From c912f3bcb249baafbd64443bc011814a8657e83e Mon Sep 17 00:00:00 2001 From: noramullen1 <42354779+noramullen1@users.noreply.github.com> Date: Thu, 15 Sep 2022 09:38:16 -0700 Subject: [PATCH 01/17] Telegraf 1.24 (#4456) * 1.24 telegraf updates * Add Linux CPU plugin --- content/telegraf/v1.24/_index.md | 23 + content/telegraf/v1.24/commands.md | 92 + content/telegraf/v1.24/configuration.md | 483 ++ .../v1.24/configure_plugins/_index.md | 13 + .../aggregator_processor/_index.md | 58 + .../external_plugins/_index.md | 23 + .../external_plugins/shim.md | 60 + .../external_plugins/write_external_plugin.md | 31 + .../configure_plugins/input_plugins/_index.md | 14 + .../input_plugins/using_http.md | 106 + .../output_plugins/_index.md | 16 + .../v1.24/configure_plugins/troubleshoot.md | 51 + content/telegraf/v1.24/contribute.md | 26 + content/telegraf/v1.24/data_formats/_index.md | 16 + .../v1.24/data_formats/input/_index.md | 37 + .../v1.24/data_formats/input/collectd.md | 49 + .../telegraf/v1.24/data_formats/input/csv.md | 112 + .../v1.24/data_formats/input/dropwizard.md | 320 ++ .../v1.24/data_formats/input/graphite.md | 195 + .../telegraf/v1.24/data_formats/input/grok.md | 227 + .../v1.24/data_formats/input/influx.md | 28 + .../telegraf/v1.24/data_formats/input/json.md | 228 + .../v1.24/data_formats/input/json_v2.md | 174 + .../v1.24/data_formats/input/logfmt.md | 43 + .../v1.24/data_formats/input/nagios.md | 30 + .../input/prometheus-remote-write.md | 62 + .../v1.24/data_formats/input/value.md | 45 + .../v1.24/data_formats/input/wavefront.md | 29 + .../telegraf/v1.24/data_formats/input/xml.md | 59 + .../v1.24/data_formats/output/_index.md | 31 + .../v1.24/data_formats/output/carbon2.md | 61 + .../v1.24/data_formats/output/graphite.md | 59 + .../v1.24/data_formats/output/influx.md | 42 + .../v1.24/data_formats/output/json.md | 90 + .../v1.24/data_formats/output/messagepack.md | 49 + .../v1.24/data_formats/output/nowmetric.md | 91 + .../v1.24/data_formats/output/splunkmetric.md | 148 + content/telegraf/v1.24/get_started.md | 120 + content/telegraf/v1.24/glossary.md | 106 + content/telegraf/v1.24/install.md | 356 ++ content/telegraf/v1.24/metrics.md | 29 + content/telegraf/v1.24/plugins.md | 82 + .../telegraf/v1.24/release-notes-changelog.md | 4198 +++++++++++++++++ data/telegraf_plugins.yml | 57 + 44 files changed, 8169 insertions(+) create mode 100644 content/telegraf/v1.24/_index.md create mode 100644 content/telegraf/v1.24/commands.md create mode 100644 content/telegraf/v1.24/configuration.md create mode 100644 content/telegraf/v1.24/configure_plugins/_index.md create mode 100644 content/telegraf/v1.24/configure_plugins/aggregator_processor/_index.md create mode 100644 content/telegraf/v1.24/configure_plugins/external_plugins/_index.md create mode 100644 content/telegraf/v1.24/configure_plugins/external_plugins/shim.md create mode 100644 content/telegraf/v1.24/configure_plugins/external_plugins/write_external_plugin.md create mode 100644 content/telegraf/v1.24/configure_plugins/input_plugins/_index.md create mode 100644 content/telegraf/v1.24/configure_plugins/input_plugins/using_http.md create mode 100644 content/telegraf/v1.24/configure_plugins/output_plugins/_index.md create mode 100644 content/telegraf/v1.24/configure_plugins/troubleshoot.md create mode 100644 content/telegraf/v1.24/contribute.md create mode 100644 content/telegraf/v1.24/data_formats/_index.md create mode 100644 content/telegraf/v1.24/data_formats/input/_index.md create mode 100644 content/telegraf/v1.24/data_formats/input/collectd.md create mode 100644 content/telegraf/v1.24/data_formats/input/csv.md create mode 100644 content/telegraf/v1.24/data_formats/input/dropwizard.md create mode 100644 content/telegraf/v1.24/data_formats/input/graphite.md create mode 100644 content/telegraf/v1.24/data_formats/input/grok.md create mode 100644 content/telegraf/v1.24/data_formats/input/influx.md create mode 100644 content/telegraf/v1.24/data_formats/input/json.md create mode 100644 content/telegraf/v1.24/data_formats/input/json_v2.md create mode 100644 content/telegraf/v1.24/data_formats/input/logfmt.md create mode 100644 content/telegraf/v1.24/data_formats/input/nagios.md create mode 100644 content/telegraf/v1.24/data_formats/input/prometheus-remote-write.md create mode 100644 content/telegraf/v1.24/data_formats/input/value.md create mode 100644 content/telegraf/v1.24/data_formats/input/wavefront.md create mode 100644 content/telegraf/v1.24/data_formats/input/xml.md create mode 100644 content/telegraf/v1.24/data_formats/output/_index.md create mode 100644 content/telegraf/v1.24/data_formats/output/carbon2.md create mode 100644 content/telegraf/v1.24/data_formats/output/graphite.md create mode 100644 content/telegraf/v1.24/data_formats/output/influx.md create mode 100644 content/telegraf/v1.24/data_formats/output/json.md create mode 100644 content/telegraf/v1.24/data_formats/output/messagepack.md create mode 100644 content/telegraf/v1.24/data_formats/output/nowmetric.md create mode 100644 content/telegraf/v1.24/data_formats/output/splunkmetric.md create mode 100644 content/telegraf/v1.24/get_started.md create mode 100644 content/telegraf/v1.24/glossary.md create mode 100644 content/telegraf/v1.24/install.md create mode 100644 content/telegraf/v1.24/metrics.md create mode 100644 content/telegraf/v1.24/plugins.md create mode 100644 content/telegraf/v1.24/release-notes-changelog.md diff --git a/content/telegraf/v1.24/_index.md b/content/telegraf/v1.24/_index.md new file mode 100644 index 000000000..a1804a1ab --- /dev/null +++ b/content/telegraf/v1.24/_index.md @@ -0,0 +1,23 @@ +--- +title: Telegraf 1.24 documentation +description: > + Documentation for Telegraf, the plugin-driven server agent of the InfluxData + time series platform, used to collect and report metrics. Telegraf supports four categories of plugins -- input, output, aggregator, and processor. +menu: + telegraf_1_24: + name: Telegraf v1.24 +weight: 1 +related: + - /resources/videos/intro-to-telegraf/ +--- + +Telegraf, a server-based agent, collects and sends metrics and events from databases, systems, and IoT sensors. +Written in Go, Telegraf compiles into a single binary with no external dependencies--requiring very minimal memory. + +For an introduction to Telegraf and an overview of how it works, watch the following video: + +{{< youtube vGJeo3FaMds >}} + +{{< influxdbu title="Telegraf Basics" summary="Learn how to get started with Telegraf with this **free** course that covers common use cases, proper configuration, and best practices for deployment. Also, discover how to write your own custom Telegraf plugins." action="Take the course" link="https://university.influxdata.com/courses/telegraf-basics-tutorial/" >}} + +{{< influxdbu "telegraf-102" >}} diff --git a/content/telegraf/v1.24/commands.md b/content/telegraf/v1.24/commands.md new file mode 100644 index 000000000..5557032ff --- /dev/null +++ b/content/telegraf/v1.24/commands.md @@ -0,0 +1,92 @@ +--- +title: Telegraf commands and flags +description: The `telegraf` command starts and runs all the processes necessary for Telegraf to function. +menu: + telegraf_1_24_ref: + + name: Commands + weight: 20 +--- + +The `telegraf` command starts and runs all the processes necessary for Telegraf to function. + +## Usage + +``` +telegraf [commands] +telegraf [flags] +``` + +## Commands + + + +| Command | Description | +| :-------- | :--------------------------------------------- | +| `config` | Print out full sample configuration to stdout. | +| `version` | Print version to stdout. | + +## Flags {id="telegraf-command-flags"} + +| Flag | Description | +| :------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------- | +| `--aggregator-filter ` | Filter aggregators to enable. Separator is `:`. | +| `--config ` | Configuration file to load. | +| `--config-directory ` | Directory containing additional `*.conf` files. | +| `--deprecation-list` | Print all deprecated plugins or plugin options. | +| `--watch-config` | Restart Telegraf on local configuration changes. Use either fs notifications (`inotify`) or polling (`poll`). Disabled by default | +| `--plugin-directory ` | Directory containing `*.so` files to search recursively for plugins. Found plugins are loaded, tagged, and identified. | +| `--debug` | Enable debug logging. | +| `--input-filter ` | Filter input plugins to enable. Separator is `:`. | +| `--input-list` | Print available input plugins. | +| `--output-filter` | Filter output plugins to enable. Separator is `:`. | +| `--output-list` | Print available output plugins. | +| `--pidfile ` | File to write PID to. | +| `--pprof-addr
` | pprof address to listen on. Disabled by default. | +| `--processor-filter ` | Filter processor plugins to enable. Separator is `:`. | +| `--quiet` | Run in quiet mode. | +| `--section-filter ` | Filter configuration sections to output (`agent`, `global_tags`, `outputs`, `processors`, `aggregators` and `inputs`). Separator is `:`. | +| `--sample-config` | Print full sample configuration. | +| `--once` | Gather metrics once, write them, and exit. | +| `--test` | Gather metrics once and print them. | +| `--test-wait` | Number of seconds to wait for service inputs to complete in test or once mode. | +| `--usage ` | Print plugin usage (example: `telegraf --usage mysql`). | +| `--version` | Print Telegraf version. | + +## Examples + +### Generate a Telegraf configuration file + +```sh +telegraf config > telegraf.conf +``` + +### Generate configuration with only CPU input and InfluxDB output plugins defined + +```sh +telegraf --input-filter cpu --output-filter influxdb config +``` + +### Run a single Telegraf configuration, outputting metrics to stdout + +```sh +telegraf --config telegraf.conf --test +``` + +### Run Telegraf with all plugins defined in configuration file** + +```sh +telegraf --config telegraf.conf +``` + +### Run Telegraf, enabling the CPU and memory input plugins and InfluxDB output plugin** + +```sh +telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb +``` + +### Run Telegraf with pprof + +```sh +telegraf --config telegraf.conf --pprof-addr localhost:6060 +``` diff --git a/content/telegraf/v1.24/configuration.md b/content/telegraf/v1.24/configuration.md new file mode 100644 index 000000000..a3e62eab8 --- /dev/null +++ b/content/telegraf/v1.24/configuration.md @@ -0,0 +1,483 @@ +--- +title: Configuration options +description: Overview of the Telegraf configuration file, enabling plugins, and setting environment variables. +aliases: + - /telegraf/v1.23/administration/configuration/ +menu: + telegraf_1_24_ref: + + name: Configuration options + weight: 20 +--- + +The Telegraf configuration file (`telegraf.conf`) lists all available Telegraf plugins. See the current version here: [telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf). + +> To quickly get started with Telegraf, see [Get started](/telegraf/v1.23/get_started/). + +## Generate a configuration file + +A default Telegraf configuration file can be auto-generated by Telegraf: + +``` +telegraf config > telegraf.conf +``` + +To generate a configuration file with specific inputs and outputs, you can use the +`--input-filter` and `--output-filter` flags: + +``` +telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config +``` + +## Configuration file locations + +Use the `--config` flag to specify the configuration file location: + +- Filename and path, for example: `--config /etc/default/telegraf` +- Remote URL endpoint, for example: `--config "http://remote-URL-endpoint"` + +Use the `--config-directory` flag to include files ending with `.conf` in the specified directory in the Telegraf +configuration. + +On most systems, the default locations are `/etc/telegraf/telegraf.conf` for +the main configuration file and `/etc/telegraf/telegraf.d` for the directory of +configuration files. + +## Set environment variables + +Add environment variables anywhere in the configuration file by prepending them with `$`. +For strings, variables must be in quotes (for example, `"$STR_VAR"`). +For numbers and Booleans, variables must be unquoted (for example, `$INT_VAR`, `$BOOL_VAR`). + +You can also set environment variables using the Linux `export` command: `export password=mypassword` + +> **Note:** We recommend using environment variables for sensitive information. + +### Example: Telegraf environment variables + +In the Telegraf environment variables file (`/etc/default/telegraf`): + +```sh +USER="alice" +INFLUX_URL="http://localhost:8086" +INFLUX_SKIP_DATABASE_CREATION="true" +INFLUX_PASSWORD="monkey123" +``` + +In the Telegraf configuration file (`/etc/telegraf.conf`): + +```sh +[global_tags] + user = "${USER}" + +[[inputs.mem]] + +[[outputs.influxdb]] + urls = ["${INFLUX_URL}"] + skip_database_creation = ${INFLUX_SKIP_DATABASE_CREATION} + password = "${INFLUX_PASSWORD}" +``` + +The environment variables above add the following configuration settings to Telegraf: + +```sh +[global_tags] + user = "alice" + +[[outputs.influxdb]] + urls = "http://localhost:8086" + skip_database_creation = true + password = "monkey123" + +``` + +## Global tags + +Global tags can be specified in the `[global_tags]` section of the config file +in `key="value"` format. All metrics being gathered on this host will be tagged +with the tags specified here. + +## Agent configuration + +Telegraf has a few options you can configure under the `[agent]` section of the +config. + +* **interval**: Default data collection interval for all inputs +* **round_interval**: Rounds collection interval to `interval`. +For example, if `interval` is set to 10s then always collect on :00, :10, :20, etc. +* **metric_batch_size**: Telegraf will send metrics to output in batch of at +most `metric_batch_size` metrics. +* **metric_buffer_limit**: Telegraf will cache `metric_buffer_limit` metrics +for each output, and will flush this buffer on a successful write. +This should be a multiple of `metric_batch_size` and could not be less +than 2 times `metric_batch_size`. +* **collection_jitter**: Collection jitter is used to jitter +the collection by a random amount. +Each plugin will sleep for a random time within jitter before collecting. +This can be used to avoid many plugins querying things like sysfs at the +same time, which can have a measurable effect on the system. +* **flush_interval**: Default data flushing interval for all outputs. +You should not set this below `interval`. +Maximum `flush_interval` will be `flush_interval` + `flush_jitter` +* **flush_jitter**: Jitter the flush interval by a random amount. +This is primarily to avoid +large write spikes for users running a large number of Telegraf instances. +For example, a `flush_jitter` of 5s and `flush_interval` of 10s means flushes will happen every 10-15s. +* **precision**: Collected metrics are rounded to the precision specified as an +`interval` (integer + unit, ex: `1ns`, `1us`, `1ms`, and `1s` . Precision will NOT +be used for service inputs, such as `logparser` and `statsd`. +* **logfile**: Specify the log file name. The empty string means to log to `stderr`. +* **debug**: Run Telegraf in debug mode. +* **quiet**: Run Telegraf in quiet mode (error messages only). +* **logtarget**: Control the destination for logs. Can be one of "file", +"stderr" or, on Windows, "eventlog". When set to "file", the output file is +determined by the "logfile" setting. +* **logfile**: Name the file to be logged to when using the "file" logtarget. If set +to the empty then logs are written to stderr. +* **logfile_rotation_interval**: Rotates logfile after the time interval specified. When +set to 0 no time based rotation is performed. +* **logfile_rotation_max_size**: Rotates logfile when it becomes larger than the specified +size. When set to 0 no size based rotation is performed. +* **logfile_rotation_max_archives**: Maximum number of rotated archives to keep, any +older logs are deleted. If set to -1, no archives are removed. +* **log_with_timezone**: Set a timezone to use when logging or type 'local' for local time. Example: 'America/Chicago'. + [See this page for options/formats.](https://socketloop.com/tutorials/golang-display-list-of-timezones-with-gmt) +* **hostname**: Override default hostname, if empty use `os.Hostname()`. +* **omit_hostname**: If true, do no set the `host` tag in the Telegraf agent. + + +## Input configuration + +The following config parameters are available for all inputs: + +* **alias**: Name an instance of a plugin. +* **interval**: How often to gather this metric. Normal plugins use a single +global interval, but if one particular input should be run less or more often, +you can configure that here. `interval` can be increased to reduce data-in rate limits. +* **precision**: Overrides the `precision` setting of the agent. Collected +metrics are rounded to the precision specified as an `interval`. When this value is +set on a service input (ex: `statsd`), multiple events occuring at the same +timestamp may be merged by the output database. +* **collection_jitter**: Overrides the `collection_jitter` setting of the agent. +Collection jitter is used to jitter the collection by a random `interval` +* **name_override**: Override the base name of the measurement. +(Default is the name of the input). +* **name_prefix**: Specifies a prefix to attach to the measurement name. +* **name_suffix**: Specifies a suffix to attach to the measurement name. +* **tags**: A map of tags to apply to a specific input's measurements. + +## Output configuration + +* **alias**: Name an instance of a plugin. +* **flush_interval**: Maximum time between flushes. Use this setting to + override the agent `flush_interval` on a per plugin basis. +* **flush_jitter**: Amount of time to jitter the flush interval. Use this + setting to override the agent `flush_jitter` on a per plugin basis. +* **metric_batch_size**: Maximum number of metrics to send at once. Use + this setting to override the agent `metric_batch_size` on a per plugin basis. +* **metric_buffer_limit**: Maximum number of unsent metrics to buffer. + Use this setting to override the agent `metric_buffer_limit` on a per plugin basis. +* **name_override**: Override the base name of the measurement. +(Default is the name of the output). +* **name_prefix**: Specifies a prefix to attach to the measurement name. +* **name_suffix**: Specifies a suffix to attach to the measurement name. + +## Aggregator configuration + +The following config parameters are available for all aggregators: + +* **alias**: Name an instance of a plugin. +* **period**: The period on which to flush & clear each aggregator. All metrics +that are sent with timestamps outside of this period will be ignored by the +aggregator. +* **delay**: The delay before each aggregator is flushed. This is to control +how long for aggregators to wait before receiving metrics from input plugins, +in the case that aggregators are flushing and inputs are gathering on the +same interval. +* **grace**: The duration the metrics will still be aggregated by the plugin +even though they're outside of the aggregation period. This setting is needed +in a situation when the agent is expected to receive late metrics and can +be rolled into next aggregation period. +* **drop_original**: If true, the original metric will be dropped by the +aggregator and will not get sent to the output plugins. +* **name_override**: Override the base name of the measurement. +(Default is the name of the input). +* **name_prefix**: Specifies a prefix to attach to the measurement name. +* **name_suffix**: Specifies a suffix to attach to the measurement name. +* **tags**: A map of tags to apply to a specific input's measurements. + +For a demonstration of how to configure SNMP, MQTT, and PostGRE SQL plugins to get data into Telegraf, see the following video: + +{{< youtube 6XJdZ_kdx14 >}} + +## Processor configuration + +The following config parameters are available for all processors: + +* **alias**: Name an instance of a plugin. +* **order**: This is the order in which processors are executed. If this +is not specified, then processor execution order will be random. + +The [metric filtering][] parameters can be used to limit what metrics are +handled by the processor. Excluded metrics are passed downstream to the next +processor. + +## Metric filtering + +Filters can be configured per input, output, processor, or aggregator, +see below for examples. + +* **namepass**: +An array of glob pattern strings. Only points whose measurement name matches +a pattern in this list are emitted. +* **namedrop**: +The inverse of `namepass`. If a match is found the point is discarded. This +is tested on points after they have passed the `namepass` test. +* **fieldpass**: +An array of glob pattern strings. Only fields whose field key matches a +pattern in this list are emitted. +* **fielddrop**: +The inverse of `fieldpass`. Fields with a field key matching one of the +patterns will be discarded from the point. +* **tagpass**: +A table mapping tag keys to arrays of glob pattern strings. Only points +that contain a tag key in the table and a tag value matching one of its +patterns is emitted. +* **tagdrop**: +The inverse of `tagpass`. If a match is found the point is discarded. This +is tested on points after they have passed the `tagpass` test. +* **taginclude**: +An array of glob pattern strings. Only tags with a tag key matching one of +the patterns are emitted. In contrast to `tagpass`, which will pass an entire +point based on its tag, `taginclude` removes all non matching tags from the +point. This filter can be used on both inputs & outputs, but it is +_recommended_ to be used on inputs, as it is more efficient to filter out tags +at the ingestion point. +* **tagexclude**: +The inverse of `taginclude`. Tags with a tag key matching one of the patterns +will be discarded from the point. + +**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters +must be defined at the _end_ of the plugin definition, otherwise subsequent +plugin config options will be interpreted as part of the tagpass/tagdrop +tables. + +To learn more about metric filtering, watch the following video: + +{{< youtube R3DnObs_OKA >}} + +## Examples + +#### Input configuration examples + +This is a full working config that will output CPU data to an InfluxDB instance +at `192.168.59.103:8086`, tagging measurements with `dc="denver-1"`. It will output +measurements at a 10s interval and will collect per-cpu data, dropping any +fields which begin with `time_`. + +```toml +[global_tags] + dc = "denver-1" + +[agent] + interval = "10s" + +# OUTPUTS +[[outputs.influxdb]] + url = "http://192.168.59.103:8086" # required. + database = "telegraf" # required. + precision = "1s" + +# INPUTS +[[inputs.cpu]] + percpu = true + totalcpu = false + # filter all fields beginning with 'time_' + fielddrop = ["time_*"] +``` + +#### Input Config: `tagpass` and `tagdrop` + +**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of +the plugin definition, otherwise subsequent plugin config options will be +interpreted as part of the tagpass/tagdrop map. + +```toml +[[inputs.cpu]] + percpu = true + totalcpu = false + fielddrop = ["cpu_time"] + # Don't collect CPU data for cpu6 & cpu7 + [inputs.cpu.tagdrop] + cpu = [ "cpu6", "cpu7" ] + +[[inputs.disk]] + [inputs.disk.tagpass] + # tagpass conditions are OR, not AND. + # If the (filesystem is ext4 or xfs) OR (the path is /opt or /home) + # then the metric passes + fstype = [ "ext4", "xfs" ] + # Globs can also be used on the tag values + path = [ "/opt", "/home*" ] +``` + +#### Input Config: `fieldpass` and `fielddrop` + +```toml +# Drop all metrics for guest & steal CPU usage +[[inputs.cpu]] + percpu = false + totalcpu = true + fielddrop = ["usage_guest", "usage_steal"] + +# Only store inode related metrics for disks +[[inputs.disk]] + fieldpass = ["inodes*"] +``` + +#### Input Config: `namepass` and `namedrop` + +```toml +# Drop all metrics about containers for kubelet +[[inputs.prometheus]] + urls = ["http://kube-node-1:4194/metrics"] + namedrop = ["container_*"] + +# Only store rest client related metrics for kubelet +[[inputs.prometheus]] + urls = ["http://kube-node-1:4194/metrics"] + namepass = ["rest_client_*"] +``` + +#### Input Config: `taginclude` and `tagexclude` + +```toml +# Only include the "cpu" tag in the measurements for the cpu plugin. +[[inputs.cpu]] + percpu = true + totalcpu = true + taginclude = ["cpu"] + +# Exclude the `fstype` tag from the measurements for the disk plugin. +[[inputs.disk]] + tagexclude = ["fstype"] +``` + +#### Input config: `prefix`, `suffix`, and `override` + +This plugin will emit measurements with the name `cpu_total`. + +```toml +[[inputs.cpu]] + name_suffix = "_total" + percpu = false + totalcpu = true +``` + +This will emit measurements with the name `foobar`. + +```toml +[[inputs.cpu]] + name_override = "foobar" + percpu = false + totalcpu = true +``` + +#### Input config: tags + +This plugin will emit measurements with two additional tags: `tag1=foo` and +`tag2=bar`. + +NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the +plugin definition. + +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + [inputs.cpu.tags] + tag1 = "foo" + tag2 = "bar" +``` + +#### Multiple inputs of the same type + +Additional inputs (or outputs) of the same type can be specified by defining these instances in the configuration file. To avoid measurement collisions, use the `name_override`, `name_prefix`, or `name_suffix` config options: + +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + +[[inputs.cpu]] + percpu = true + totalcpu = false + name_override = "percpu_usage" + fielddrop = ["cpu_time*"] +``` + +#### Output configuration examples: + +```toml +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf" + precision = "1s" + # Drop all measurements that start with "aerospike" + namedrop = ["aerospike*"] + +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf-aerospike-data" + precision = "1s" + # Only accept aerospike data: + namepass = ["aerospike*"] + +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf-cpu0-data" + precision = "1s" + # Only store measurements where the tag "cpu" matches the value "cpu0" + [outputs.influxdb.tagpass] + cpu = ["cpu0"] +``` + +#### Aggregator Configuration Examples: + +This will collect and emit the min/max of the system load1 metric every +30s, dropping the originals. + +```toml +[[inputs.system]] + fieldpass = ["load1"] # collects system load1 metric. + +[[aggregators.minmax]] + period = "30s" # send & clear the aggregate every 30s. + drop_original = true # drop the original metrics. + +[[outputs.file]] + files = ["stdout"] +``` + +This will collect and emit the min/max of the swap metrics every +30s, dropping the originals. The aggregator will not be applied +to the system load metrics due to the `namepass` parameter. + +```toml +[[inputs.swap]] + +[[inputs.system]] + fieldpass = ["load1"] # collects system load1 metric. + +[[aggregators.minmax]] + period = "30s" # send & clear the aggregate every 30s. + drop_original = true # drop the original metrics. + namepass = ["swap"] # only "pass" swap metrics through the aggregator. + +[[outputs.file]] + files = ["stdout"] +``` + +To learn more about configuring the Telegraf agent, watch the following video: + +{{< youtube txUcAxMDBlQ >}} diff --git a/content/telegraf/v1.24/configure_plugins/_index.md b/content/telegraf/v1.24/configure_plugins/_index.md new file mode 100644 index 000000000..4b5128484 --- /dev/null +++ b/content/telegraf/v1.24/configure_plugins/_index.md @@ -0,0 +1,13 @@ +--- +title: Configure plugins +description: +menu: + telegraf_1_24: + + name: Configure plugins + weight: 50 +--- + +Telegraf is a server-based agent for collecting and sending metrics and events from databases, systems, and IoT sensors. + +{{< children hlevel="h2" >}} diff --git a/content/telegraf/v1.24/configure_plugins/aggregator_processor/_index.md b/content/telegraf/v1.24/configure_plugins/aggregator_processor/_index.md new file mode 100644 index 000000000..23df07bcc --- /dev/null +++ b/content/telegraf/v1.24/configure_plugins/aggregator_processor/_index.md @@ -0,0 +1,58 @@ +--- +title: Transform data with aggregator and processor plugins +description: | + Aggregator and processor plugins aggregate and process metrics. +menu: + telegraf_1_24: + name: Aggregator and processor plugins + weight: 50 + parent: Configure plugins +--- + + +In addition to input plugins and output plugins, Telegraf includes aggregator and processor plugins, which are used to aggregate and process metrics as they pass through Telegraf. + +{{< diagram >}} + graph TD + Process[Process
- transform
- decorate
- filter] + Aggregate[Aggregate
- transform
- decorate
- filter] + + CPU --> Process + Memory --> Process + MySQL --> Process + SNMP --> Process + Docker --> Process + Process --> Aggregate + Aggregate --> InfluxDB + Aggregate --> File + Aggregate --> Kafka + +style Process text-align:left +style Aggregate text-align:left +{{< /diagram >}} + +**Processor plugins** process metrics as they pass through and immediately emit +results based on the values they process. For example, this could be printing +all metrics or adding a tag to all metrics that pass through. + +**Aggregator plugins**, on the other hand, are a bit more complicated. Aggregators +are typically for emitting new _aggregate_ metrics, such as a running mean, +minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_ +plugins are configured with a `period`. The `period` is the size of the window +of metrics that each _aggregate_ represents. In other words, the emitted +_aggregate_ metric will be the aggregated value of the past `period` seconds. +Since many users will only care about their aggregates and not every single metric +gathered, there is also a `drop_original` argument, which tells Telegraf to only +emit the aggregates and not the original metrics. + +{{% note %}} +#### Behavior of processors and aggregators when used together +When using both aggregator and processor plugins in Telegraf v1.17, processor plugins +process data and then pass it to aggregator plugins. +After aggregator plugins aggregate the data, they pass it back to processor plugins. +This can have unintended consequences, such as executing mathematical operations twice. +_See [influxdata/telegraf#7993](https://github.com/influxdata/telegraf/issues/7993)._ + +If using custom processor scripts, they must be idempotent (repeatable, without side-effects). +For custom processes that are not idempotent, use [namepass or namedrop](/telegraf/v1.17/administration/configuration/#input-config-namepass-and-namedrop) to avoid issues when aggregated data is processed a second time. +{{% /note %}} diff --git a/content/telegraf/v1.24/configure_plugins/external_plugins/_index.md b/content/telegraf/v1.24/configure_plugins/external_plugins/_index.md new file mode 100644 index 000000000..492513f87 --- /dev/null +++ b/content/telegraf/v1.24/configure_plugins/external_plugins/_index.md @@ -0,0 +1,23 @@ +--- +title: Integrate with external plugins +description: | + External plugins are external programs that are built outside of Telegraf that can run through an `execd` plugin. +menu: + telegraf_1_24: + name: External plugins + weight: 50 + parent: Configure plugins +--- + +[External plugins](https://github.com/influxdata/telegraf/blob/master/EXTERNAL_PLUGINS.md) are external programs that are built outside +of Telegraf that can run through an `execd` plugin. These external plugins allow for +more flexibility compared to internal Telegraf plugins. Benefits to using external plugins include: +- Access to libraries not written in Go +- Using licensed software (not available to open source community) +- Including large dependencies that would otherwise bloat Telegraf +- Using your external plugin immediately without waiting for the Telegraf team to publish +- Easily convert plugins between internal and external using the [shim](https://github.com/influxdata/telegraf/blob/master/plugins/common/shim/README.md) + + + +{{< children hlevel="h2" >}} diff --git a/content/telegraf/v1.24/configure_plugins/external_plugins/shim.md b/content/telegraf/v1.24/configure_plugins/external_plugins/shim.md new file mode 100644 index 000000000..679971391 --- /dev/null +++ b/content/telegraf/v1.24/configure_plugins/external_plugins/shim.md @@ -0,0 +1,60 @@ +--- +title: Use the `execd` shim +description: +menu: + telegraf_1_24: + + name: Use the `execd` shim + weight: 50 + parent: External plugins +--- + +The shim makes it easy to extract an internal input, +processor, or output plugin from the main Telegraf repo out to a stand-alone repo. This allows anyone to build and run it as a separate app using one of the +`execd` plugins: +- [inputs.execd](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/execd) +- [processors.execd](https://github.com/influxdata/telegraf/blob/master//plugins/processors/execd) +- [outputs.execd](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/execd) + +## Extract a plugin using the shim wrapper + +1. Move the project to an external repo. We recommend preserving the path + structure: for example, if your plugin was located at + `plugins/inputs/cpu` in the Telegraf repo, move it to `plugins/inputs/cpu` + in the new repo. +2. Copy [main.go](https://github.com/influxdata/telegraf/blob/master/plugins/common/shim/example/cmd/main.go) into your project under the `cmd` folder. + This serves as the entry point to the plugin when run as a stand-alone program. + {{% note %}} + The shim isn't designed to run multiple plugins at the same time, so include only one plugin per repo. + {{% /note %}} +3. Edit the `main.go` file to import your plugin. For example,`_ "github.com/me/my-plugin-telegraf/plugins/inputs/cpu"`. See an example of where to edit `main.go` [here](https://github.com/influxdata/telegraf/blob/7de9c5ff279e10edf7fe3fdd596f3b33902c912b/plugins/common/shim/example/cmd/main.go#L9). +4. Add a [plugin.conf](https://github.com/influxdata/telegraf/blob/master/plugins/common/shim/example/cmd/plugin.conf) for configuration + specific to your plugin. + {{% note %}} + This config file must be separate from the rest of the config for Telegraf, and must not be in a shared directory with other Telegraf configs. + {{% /note %}} + +## Test and run your plugin + +1. Build the `cmd/main.go` using the following command with your plugin name: `go build -o plugin-name cmd/main.go` +1. Test the binary: +2. If you're building a processor or output, first feed valid metrics in on `STDIN`. Skip this step if you're building an input. +3. Test out the binary by running it (for example, `./project-name -config plugin.conf`). + Metrics will be written to `STDOUT`. You might need to hit enter or wait for your poll duration to elapse to see data. +4. Press `Ctrl-C` to end your test. +5. Configure Telegraf to call your new plugin binary. For an input, this would + look something like: + +```toml +[[inputs.execd]] + command = ["/path/to/rand", "-config", "/path/to/plugin.conf"] + signal = "none" +``` + +Refer to the `execd` plugin documentation for more information. + +## Publish your plugin + +Publishing your plugin to GitHub and open a Pull Request +back to the Telegraf repo letting us know about the availability of your +[external plugin](https://github.com/influxdata/telegraf/blob/master/EXTERNAL_PLUGINS.md). diff --git a/content/telegraf/v1.24/configure_plugins/external_plugins/write_external_plugin.md b/content/telegraf/v1.24/configure_plugins/external_plugins/write_external_plugin.md new file mode 100644 index 000000000..f660a3d86 --- /dev/null +++ b/content/telegraf/v1.24/configure_plugins/external_plugins/write_external_plugin.md @@ -0,0 +1,31 @@ +--- +title: Write an external plugin +description: +menu: + telegraf_1_24: + + name: Write an external plugin + weight: 50 + parent: External plugins +--- +Set up your plugin to use it with `execd`. + +{{% note %}} +For listed [external plugins](/EXTERNAL_PLUGINS.md), the author of the external plugin is also responsible for the maintenance +and feature development of external plugins. +{{% /note %}} + +1. Write your Telegraf plugin. Follow InfluxData's best practices: + - [Input plugins](https://github.com/influxdata/telegraf/blob/master/docs/INPUTS.md) + - [Processor plugins](https://github.com/influxdata/telegraf/blob/master/docs/PROCESSORS.md) + - [Aggregator plugins](https://github.com/influxdata/telegraf/blob/master/docs/AGGREGATORS.md) + - [Output plugins](https://github.com/influxdata/telegraf/blob/master/docs/OUTPUTS.md) +2. If your plugin is written in Go, follow the steps for the [Execd Go Shim](/{{< latest "telegraf" >}}/configure_plugins/external_plugins/shim). +3. Add usage and development instructions in the homepage of your repository for running your plugin with its respective `execd` plugin. Refer to [openvpn](https://github.com/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](https://github.com/vipinvkmenon/awsalarms#installation) for examples. +Include the following steps: + - How to download the release package for your platform or how to clone the binary for your external plugin + - Commands to build your binary + - Location to edit your `telegraf.conf` + - Configuration to run your external plugin with [inputs.execd](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/execd), + [processors.execd](/plugins/processors/execd) or [outputs.execd](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/execd) +4. Submit your plugin by opening a PR to add your external plugin to the [/EXTERNAL_PLUGINS.md](https://github.com/influxdata/telegraf/blob/master/EXTERNAL_PLUGINS.md) list. Include the plugin name, a link to the plugin repository and a short description of the plugin. diff --git a/content/telegraf/v1.24/configure_plugins/input_plugins/_index.md b/content/telegraf/v1.24/configure_plugins/input_plugins/_index.md new file mode 100644 index 000000000..0d244ecf7 --- /dev/null +++ b/content/telegraf/v1.24/configure_plugins/input_plugins/_index.md @@ -0,0 +1,14 @@ +--- +title: Collect data with input plugins +description: | + Collect data from a variety of sources with Telegraf input plugins. +menu: + telegraf_1_24: + + name: Input plugins + weight: 10 + parent: Configure plugins +--- +For a complete list of input plugins and links to their detailed configuration options, see [input plugins](/{{< latest "telegraf" >}}/plugins/inputs/). + +In addition to plugin-specific data formats, Telegraf supports a set of [common data formats](/{{< latest "telegraf" >}}/data_formats/input/) available when configuring many of the Telegraf input plugins. diff --git a/content/telegraf/v1.24/configure_plugins/input_plugins/using_http.md b/content/telegraf/v1.24/configure_plugins/input_plugins/using_http.md new file mode 100644 index 000000000..0cb71bba3 --- /dev/null +++ b/content/telegraf/v1.24/configure_plugins/input_plugins/using_http.md @@ -0,0 +1,106 @@ +--- +title: Using the HTTP input plugin with Citi Bike data +description: Collect live metrics on Citi Bike stations in New York City with the HTTP input plugin. +menu: + telegraf_1_24: + + name: Using the HTTP plugin + weight: 30 + parent: Input plugins +--- + +This example walks through using the Telegraf HTTP input plugin to collect live metrics on Citi Bike stations in New York City. Live station data is available in JSON format directly from [Citi Bike](https://ride.citibikenyc.com/system-data). + +For the following example to work, configure [`influxdb_v2` output plugin](/telegraf/v1.23/plugins/#output-influxdb_v2). This plugin is what allows Telegraf to write the metrics to InfluxDB. + +## Configure the HTTP Input plugin in your Telegraf configuration file + +To retrieve data from the Citi Bike URL endpoint, enable the `inputs.http` input plugin in your Telegraf configuration file. + +Specify the following options: + +### `urls` +One or more URLs to read metrics from. For this example, use `https://gbfs.citibikenyc.com/gbfs/en/station_status.json`. + +### `data_format` +The format of the data in the HTTP endpoints that Telegraf will ingest. For this example, use JSON. + + +## Add parser information to your Telegraf configuration + +Specify the following JSON-specific options. In this example, we use the objects subtable to gather +data from [JSON Objects](https://www.w3schools.com/js/js_json_objects.asp). + +### JSON + +#### `path` +To parse a JSON object, set the `path` option with a [GJSON](https://github.com/tidwall/gjson) path. The result of the query should contain a JSON object or an array of objects. The [GJSON playground](https://gjson.dev/) is a very helpful tool in checking your query. + +#### `tags` +List of one or more JSON keys that should be added as tags. For this example, we'll use the tag key `station_id`. + +#### `timestamp_key` +Key from the JSON file that creates the timestamp metric. In this case, we want to use the time that station data was last reported, or the `last_reported`. If you don't specify a key, the time that Telegraf reads the data becomes the timestamp. + +#### `timestamp_format` +The format used to interpret the designated `timestamp_key`. The `last_reported` time in this example is reported in unix format. + +#### Example configuration + + ```toml +[[inputs.http]] + # URL for NYC's Citi Bike station data in JSON format + urls = ["https://gbfs.citibikenyc.com/gbfs/en/station_status.json"] + + # Overwrite measurement name from default `http` to `citibikenyc` + name_override = "citibike" + + # Exclude url and host items from tags + tagexclude = ["url", "host"] + + # Data from HTTP in JSON format + data_format = "json_v2" + + + # Add a subtable to use the `json_v2` parser + [[inputs.http.json_v2]] + + # Add an object subtable for to parse a JSON object + [[inputs.http.json_v2.object]] + + # Parse data in `data.stations` path only + path = "data.stations" + + #Set station metadata as tags + tags = ["station_id"] + + # Latest station information reported at `last_reported` + timestamp_key = "last_reported" + + # Time is reported in unix timestamp format + timestamp_format = "unix" + ``` + + + +## Start Telegraf and verify data appears + +[Start the Telegraf service](/telegraf/v1.23/get_started/#start-telegraf). + +To test that the data is being sent to InfluxDB, run the following (replacing `telegraf.conf` with the path to your configuration file): + +``` +telegraf -config ~/telegraf.conf -test +``` + +This command should return line protocol that looks similar to the following: + +``` +citibike,station_id=4703 eightd_has_available_keys=false,is_installed=1,is_renting=1,is_returning=1,legacy_id="4703",num_bikes_available=6,num_bikes_disabled=2,num_docks_available=26,num_docks_disabled=0,num_ebikes_available=0,station_status="active" 1641505084000000000 +citibike,station_id=4704 eightd_has_available_keys=false,is_installed=1,is_renting=1,is_returning=1,legacy_id="4704",num_bikes_available=10,num_bikes_disabled=2,num_docks_available=36,num_docks_disabled=0,num_ebikes_available=0,station_status="active" 1641505084000000000 +citibike,station_id=4711 eightd_has_available_keys=false,is_installed=1,is_renting=1,is_returning=1,legacy_id="4711",num_bikes_available=9,num_bikes_disabled=0,num_docks_available=36,num_docks_disabled=0,num_ebikes_available=1,station_status="active" 1641505084000000000 +``` + +Now, you can explore and query the Citi Bike data in InfluxDB. The example below is an Flux query and visualization showing the number of available bikes over the past 15 minutes. + +![Citi Bike visualization](/img/telegraf/new-citibike_query.png) diff --git a/content/telegraf/v1.24/configure_plugins/output_plugins/_index.md b/content/telegraf/v1.24/configure_plugins/output_plugins/_index.md new file mode 100644 index 000000000..c0d9b4bcc --- /dev/null +++ b/content/telegraf/v1.24/configure_plugins/output_plugins/_index.md @@ -0,0 +1,16 @@ +--- +title: Write data with output plugins +description: | + Output plugins define where Telegraf will deliver the collected metrics. +menu: + telegraf_1_24: + + name: Output plugins + weight: 20 + parent: Configure plugins +--- +Output plugins define where Telegraf will deliver the collected metrics. Send metrics to InfluxDB or to a variety of other datastores, services, and message queues, including Graphite, OpenTSDB, Datadog, Librato, Kafka, MQTT, and NSQ. + +For a complete list of output plugins and links to their detailed configuration options, see [output plugins](/{{< latest "telegraf" >}}/plugins/outputs/). + +In addition to plugin-specific data formats, Telegraf supports a set of [common data formats](/{{< latest "telegraf" >}}/data_formats/output/) available when configuring many of the Telegraf output plugins. diff --git a/content/telegraf/v1.24/configure_plugins/troubleshoot.md b/content/telegraf/v1.24/configure_plugins/troubleshoot.md new file mode 100644 index 000000000..b024641a0 --- /dev/null +++ b/content/telegraf/v1.24/configure_plugins/troubleshoot.md @@ -0,0 +1,51 @@ +--- +title: Troubleshoot Telegraf +description: Resolve common issues with Telegraf. +menu: + telegraf_1_24: + + name: Troubleshoot + Parent: Configure plugins + weight: 79 +--- + +## Validate your Telegraf configuration with `--test` + +Run a single telegraf collection, outputting metrics to stdout: +`telegraf --config telegraf.conf --test` + +## Use the `--once` option to single-shot execute + +Once tested, run `telegraf --config telegraf.conf --once` to perform a single-shot execution of all configured plugins. This sends output to partner systems specified in the `telegraf.conf` rather than writing to `stdout`. + +## Add `outputs.file` to read to a file or STDOUT + +The following step might be helpful if: +- You're encountering issues in your output and trying to determine if it’s an issue with your configuration or connection. +- `-test` outputs metrics to stdout as expected and your input, parsers, processors, and aggregators are configured correctly. Note that if it's a listener plugin, `-test` wouldn't output any metrics right away. + +Add the `file` output plugin with the metrics reporting to STDOUT or to a file. +```toml +[[outputs.file]] + files = ["stdout"] +``` + +## Set `debug = true` in your settings + +When you set `debug = true` in global settings, Telegraf runs with debug log messages. + +``` +2021-06-28T19:18:00Z I! Starting Telegraf 1.19.0 +2021-06-28T19:18:00Z I! Loaded inputs: cpu disk diskio mem net processes swap system +2021-06-28T19:18:00Z I! Loaded aggregators: +2021-06-28T19:18:00Z I! Loaded processors: +2021-06-28T19:18:00Z I! Loaded outputs: influxdb_v2 +2021-06-28T19:18:00Z I! Tags enabled: host=MBP15-INFLUX.local +2021-06-28T19:18:00Z I! [agent] Config: Interval:10s, Quiet:false, Hostname:"MBP15-INFLUX.local", Flush Interval:30s +2021-06-28T19:18:00Z D! [agent] Initializing plugins +2021-06-28T19:18:00Z D! [agent] Connecting outputs +2021-06-28T19:18:00Z D! [agent] Attempting connection to [outputs.influxdb_v2] +2021-06-28T19:18:00Z D! [agent] Successfully connected to outputs.influxdb_v2 +2021 +-06-28T19:18:00Z D! [agent] Starting service inputs +``` diff --git a/content/telegraf/v1.24/contribute.md b/content/telegraf/v1.24/contribute.md new file mode 100644 index 000000000..f62ea0d54 --- /dev/null +++ b/content/telegraf/v1.24/contribute.md @@ -0,0 +1,26 @@ +--- +title: Contribute to Telegraf +description: +menu: + telegraf_1_24_ref: + name: Contribute to Telegraf + weight: 80 +--- + +To contribute to the Telegraf project, complete the following steps: + +1. [Sign the InfluxData Contributor License Agreement (CLA)](#sign-influxdata-contributor-license-agreement-cla). +2. [Review contribution guidelines](#review-contribution-guidelines). +3. [Review the Telegraf open source license](#review-open-source-license). + +## Sign InfluxData Contributor License Agreement (CLA) + +Before contributing to the InfluxDB OSS project, you must complete and sign the [InfluxData Contributor License Agreement (CLA)](https://www.influxdata.com/legal/cla/), available on the InfluxData website. + +## Review contribution guidelines + +To learn how you can contribute to the Telegraf project, see our [Contributing guidelines](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md) in the GitHub repository. + +## Review open source license + +See information about our [open source MIT license for Telegraf](https://github.com/influxdata/telegraf/blob/master/LICENSE) in GitHub. diff --git a/content/telegraf/v1.24/data_formats/_index.md b/content/telegraf/v1.24/data_formats/_index.md new file mode 100644 index 000000000..d1b2abda4 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/_index.md @@ -0,0 +1,16 @@ +--- +title: Telegraf data formats +description: Telegraf supports input data formats and output data formats for converting input and output data. +menu: + telegraf_1_24_ref: + + name: Data formats + + weight: 50 +--- + +This section covers the input data formats and output data formats used in the Telegraf plugin-driven server agent component of the InfluxData time series platform. + +{{< children hlevel="h2" >}} + + diff --git a/content/telegraf/v1.24/data_formats/input/_index.md b/content/telegraf/v1.24/data_formats/input/_index.md new file mode 100644 index 000000000..47e10adad --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/_index.md @@ -0,0 +1,37 @@ +--- +title: Telegraf input data formats +description: Telegraf supports parsing input data formats into Telegraf metrics. +menu: + telegraf_1_24_ref: + + name: Input data formats + weight: 1 + parent: Data formats +--- + +Telegraf contains many general purpose plugins that support parsing input data +using a configurable parser into [metrics][]. This allows, for example, the +`kafka_consumer` input plugin to process messages in either InfluxDB Line +Protocol or in JSON format. Telegraf supports the following input data formats: + +{{< children >}} + +Any input plugin containing the `data_format` option can use it to select the +desired parser: + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] + + ## measurement name suffix (for separating different commands) + name_suffix = "_mycollector" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json_v2" +``` + +[metrics]: /telegraf/v1.15/concepts/metrics/ diff --git a/content/telegraf/v1.24/data_formats/input/collectd.md b/content/telegraf/v1.24/data_formats/input/collectd.md new file mode 100644 index 000000000..32a2d9338 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/collectd.md @@ -0,0 +1,49 @@ +--- +title: Collectd input data format +description: Use the `collectd` input data format to parse the collectd network binary protocol to create tags for host, instance, type, and type instance. +menu: + telegraf_1_24_ref: + + name: collectd + weight: 10 + parent: Input data formats +--- + +The collectd input data format parses the collectd network binary protocol to create tags for host, instance, type, and type instance. All collectd values are added as float64 fields. + +For more information, see [binary protocol](https://collectd.org/wiki/index.php/Binary_protocol) in the collectd Wiki. + +You can control the cryptographic settings with parser options. +Create an authentication file and set `collectd_auth_file` to the path of the file, then set the desired security level in `collectd_security_level`. + +For more information, including client setup, see +[Cryptographic setup](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup) in the collectd Wiki. + +You can also change the path to the typesdb or add additional typesdb using +`collectd_typesdb`. + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "collectd" + + ## Authentication file for cryptographic security levels + collectd_auth_file = "/etc/collectd/auth_file" + ## One of none (default), sign, or encrypt + collectd_security_level = "encrypt" + ## Path of to TypesDB specifications + collectd_typesdb = ["/usr/share/collectd/types.db"] + + ## Multi-value plugins can be handled two ways. + ## "split" will parse and store the multi-value plugin data into separate measurements + ## "join" will parse and store the multi-value plugin as a single multi-value measurement. + ## "split" is the default behavior for backward compatability with previous versions of influxdb. + collectd_parse_multivalue = "split" +``` diff --git a/content/telegraf/v1.24/data_formats/input/csv.md b/content/telegraf/v1.24/data_formats/input/csv.md new file mode 100644 index 000000000..ae6c913da --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/csv.md @@ -0,0 +1,112 @@ +--- +title: CSV input data format +description: Use the `csv` input data format to parse a document containing comma-separated values into Telegraf metrics. +menu: + telegraf_1_24_ref: + + name: CSV + weight: 20 + parent: Input data formats +--- + +The CSV input data format parses documents containing comma-separated values into Telegraf metrics. + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "csv" + + ## Indicates how many rows to treat as a header. By default, the parser assumes + ## there is no header and will parse the first row as data. If set to anything more + ## than 1, column names will be concatenated with the name listed in the next header row. + ## If `csv_column_names` is specified, the column names in header will be overridden. + csv_header_row_count = 0 + + ## For assigning custom names to columns + ## If this is specified, all columns should have a name + ## Unnamed columns will be ignored by the parser. + ## If `csv_header_row_count` is set to 0, this config must be used + csv_column_names = [] + + ## Indicates the number of rows to skip before looking for header information. + csv_skip_rows = 0 + + ## Indicates the number of columns to skip before looking for data to parse. + ## These columns will be skipped in the header as well. + csv_skip_columns = 0 + + ## The seperator between csv fields + ## By default, the parser assumes a comma (",") + csv_delimiter = "," + + ## The character reserved for marking a row as a comment row + ## Commented rows are skipped and not parsed + csv_comment = "" + + ## If set to true, the parser will remove leading whitespace from fields + ## By default, this is false + csv_trim_space = false + + ## Columns listed here will be added as tags. Any other columns + ## will be added as fields. + csv_tag_columns = [] + + ## The column to extract the name of the metric from + csv_measurement_column = "" + + ## The column to extract time information for the metric + ## `csv_timestamp_format` must be specified if this is used + csv_timestamp_column = "" + + ## The format of time data extracted from `csv_timestamp_column` + ## this must be specified if `csv_timestamp_column` is specified + csv_timestamp_format = "" + ``` +### csv_timestamp_column, csv_timestamp_format + +By default the current time will be used for all created metrics, to set the +time using the JSON document you can use the `csv_timestamp_column` and +`csv_timestamp_format` options together to set the time to a value in the parsed +document. + +The `csv_timestamp_column` option specifies the column name containing the +time value and `csv_timestamp_format` must be set to a Go "reference time" +which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006`. + +Consult the Go [time][time parse] package for details and additional examples +on how to set the time format. + +## Metrics + +One metric is created for each row with the columns added as fields. The type +of the field is automatically determined based on the contents of the value. + +## Examples + +Config: +``` +[[inputs.file]] + files = ["example"] + data_format = "csv" + csv_header_row_count = 1 + csv_timestamp_column = "time" + csv_timestamp_format = "2006-01-02T15:04:05Z07:00" +``` + +Input: +``` +measurement,cpu,time_user,time_system,time_idle,time +cpu,cpu0,42,42,42,2018-09-13T13:03:28Z +``` + +Output: +``` +cpu cpu=cpu0,time_user=42,time_system=42,time_idle=42 1536869008000000000 +``` diff --git a/content/telegraf/v1.24/data_formats/input/dropwizard.md b/content/telegraf/v1.24/data_formats/input/dropwizard.md new file mode 100644 index 000000000..e684d55ab --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/dropwizard.md @@ -0,0 +1,320 @@ +--- +title: Dropwizard input data format +description: Use the `dropwizard` input data format to parse Dropwizard JSON representations into Telegraf metrics. +menu: + telegraf_1_24_ref: + + name: Dropwizard + weight: 30 + parent: Input data formats + aliases: + - /telegraf/v1.23/data_formats/template-patterns/ +--- + +The `dropwizard` data format can parse a [Dropwizard JSON representation](http://metrics.dropwizard.io/3.1.0/manual/json/) representation of a single metrics registry. By default, tags are parsed from metric names as if they were actual InfluxDB Line Protocol keys (`measurement<,tag_set>`) which can be overridden using custom [template patterns](#templates). All field value types are supported, including `string`, `number` and `boolean`. + + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "dropwizard" + + ## Used by the templating engine to join matched values when cardinality is > 1 + separator = "_" + + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. There can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag(s) + ## 3. filter + template with field key + ## 4. default template + ## By providing an empty template array, templating is disabled and measurements are parsed as influxdb line protocol keys (measurement<,tag_set>) + templates = [] + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the metric registry within the JSON document + # dropwizard_metric_registry_path = "metrics" + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the default time of the measurements within the JSON document + # dropwizard_time_path = "time" + # dropwizard_time_format = "2006-01-02T15:04:05Z07:00" + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the tags map within the JSON document + # dropwizard_tags_path = "tags" + + ## You may even use tag paths per tag + # [inputs.exec.dropwizard_tag_paths] + # tag1 = "tags.tag1" + # tag2 = "tags.tag2" +``` + + +## Examples + +A typical JSON of a dropwizard metric registry: + +```json +{ + "version": "3.0.0", + "counters" : { + "measurement,tag1=green" : { + "count" : 1 + } + }, + "meters" : { + "measurement" : { + "count" : 1, + "m15_rate" : 1.0, + "m1_rate" : 1.0, + "m5_rate" : 1.0, + "mean_rate" : 1.0, + "units" : "events/second" + } + }, + "gauges" : { + "measurement" : { + "value" : 1 + } + }, + "histograms" : { + "measurement" : { + "count" : 1, + "max" : 1.0, + "mean" : 1.0, + "min" : 1.0, + "p50" : 1.0, + "p75" : 1.0, + "p95" : 1.0, + "p98" : 1.0, + "p99" : 1.0, + "p999" : 1.0, + "stddev" : 1.0 + } + }, + "timers" : { + "measurement" : { + "count" : 1, + "max" : 1.0, + "mean" : 1.0, + "min" : 1.0, + "p50" : 1.0, + "p75" : 1.0, + "p95" : 1.0, + "p98" : 1.0, + "p99" : 1.0, + "p999" : 1.0, + "stddev" : 1.0, + "m15_rate" : 1.0, + "m1_rate" : 1.0, + "m5_rate" : 1.0, + "mean_rate" : 1.0, + "duration_units" : "seconds", + "rate_units" : "calls/second" + } + } +} +``` + +Would get translated into 4 different measurements: + +``` +measurement,metric_type=counter,tag1=green count=1 +measurement,metric_type=meter count=1,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 +measurement,metric_type=gauge value=1 +measurement,metric_type=histogram count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0 +measurement,metric_type=timer count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0,stddev=1.0,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 +``` + +You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field. +Eg. to parse the following JSON document: + +```json +{ + "time" : "2017-02-22T14:33:03.662+02:00", + "tags" : { + "tag1" : "green", + "tag2" : "yellow" + }, + "metrics" : { + "counters" : { + "measurement" : { + "count" : 1 + } + }, + "meters" : {}, + "gauges" : {}, + "histograms" : {}, + "timers" : {} + } +} +``` +and translate it into: + +``` +measurement,metric_type=counter,tag1=green,tag2=yellow count=1 1487766783662000000 +``` + +you simply need to use the following additional configuration properties: + +```toml +dropwizard_metric_registry_path = "metrics" +dropwizard_time_path = "time" +dropwizard_time_format = "2006-01-02T15:04:05Z07:00" +dropwizard_tags_path = "tags" +## tag paths per tag are supported too, eg. +#[inputs.yourinput.dropwizard_tag_paths] +# tag1 = "tags.tag1" +# tag2 = "tags.tag2" +``` + +## Templates + +Template patterns are a mini language that describes how a dot-delimited +string should be mapped to and from [metrics](/telegraf/v1.23/concepts/metrics/). + +A template has the following format: +``` +"host.mytag.mytag.measurement.measurement.field*" +``` + +You can set the following keywords: + +- `measurement`: Specifies that this section of the graphite bucket corresponds +to the measurement name. This can be specified multiple times. +- `field`: Specifies that this section of the graphite bucket corresponds +to the field name. This can be specified multiple times. +- `measurement*`: Specifies that all remaining elements of the graphite bucket +correspond to the measurement name. +- `field*`: Specifies that all remaining elements of the graphite bucket +correspond to the field name. + +{{% note %}} +`field*` can't be used in conjunction with `measurement*`. +{{% /note %}} + +Any part of the template that isn't a keyword is treated as a tag key, which can also be used multiple times. + +### Examples + +#### Measurement and tag templates + +The most basic template is to specify a single transformation to apply to all +incoming metrics. + +##### Template + +```toml +templates = [ + "region.region.measurement*" +] +``` + +##### Resulting transformation + +``` +us.west.cpu.load 100 +=> cpu.load,region=us.west value=100 +``` + +You can also specify multiple templates using [filters](#filter-templates). + +```toml +templates = [ + "*.*.* region.region.measurement", # <- all 3-part measurements will match this one. + "*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one. +] +``` + +#### Field templates + +The field keyword tells Telegraf to give the metric that field name. + +##### Template + +```toml +separator = "_" +templates = [ + "measurement.measurement.field.field.region" +] +``` + +##### Resulting transformation + +``` +cpu.usage.idle.percent.eu-east 100 +=> cpu_usage,region=eu-east idle_percent=100 +``` + +You can also derive the field key from all remaining elements of the graphite +bucket by specifying `field*`. + +##### Template + +```toml +separator = "_" +templates = [ + "measurement.measurement.region.field*" +] +``` + +##### Resulting transformation + +``` +cpu.usage.eu-east.idle.percentage 100 +=> cpu_usage,region=eu-east idle_percentage=100 +``` + +#### Filter templates + +You can also filter templates based on the name of the bucket +using a wildcard. + +##### Template + +```toml +templates = [ + "cpu.* measurement.measurement.region", + "mem.* measurement.measurement.host" +] +``` + +##### Resulting transformation + +``` +cpu.load.eu-east 100 +=> cpu_load,region=eu-east value=100 + +mem.cached.localhost 256 +=> mem_cached,host=localhost value=256 +``` + +#### Adding tags + +You can add additional tags to a metric that don't exist on the received metric by specifying them after the pattern. Tags have the same format as the line protocol. +Separate multiple tags with commas. + +##### Template + +```toml +templates = [ + "measurement.measurement.field.region datacenter=1a" +] +``` + +##### Resulting transformation + +``` +cpu.usage.idle.eu-east 100 +=> cpu_usage,region=eu-east,datacenter=1a idle=100 diff --git a/content/telegraf/v1.24/data_formats/input/graphite.md b/content/telegraf/v1.24/data_formats/input/graphite.md new file mode 100644 index 000000000..1e7cd3777 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/graphite.md @@ -0,0 +1,195 @@ +--- +title: Graphite input data format +description: Use the Graphite data format to translate Graphite dot buckets directly into Telegraf measurement names, with a single value field, and without any tags. +menu: + telegraf_1_24_ref: + + name: Graphite + weight: 40 + parent: Input data formats + aliases: + - /telegraf/v1.23/data_formats/template-patterns/ +--- + +The Graphite data format translates Graphite *dot* buckets directly into +Telegraf measurement names, with a single value field, and without any tags. +By default, the separator is left as `.`, but this can be changed using the +`separator` argument. For more advanced options, Telegraf supports specifying +[templates](#templates) to translate graphite buckets into Telegraf metrics. + +## Configuration + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] + + ## measurement name suffix (for separating different commands) + name_suffix = "_mycollector" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "graphite" + + ## This string will be used to join the matched values. + separator = "_" + + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. There can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag(s) + ## 3. filter + template with field key + ## 4. default template + templates = [ + "*.app env.service.resource.measurement", + "stats.* .host.measurement* region=eu-east,agent=sensu", + "stats2.* .host.measurement.field", + "measurement*" + ] +``` + +## Templates + +Template patterns are a mini language that describes how a dot-delimited +string should be mapped to and from [metrics](/telegraf/v1.23/concepts/metrics/). + +A template has the following format: +``` +"host.mytag.mytag.measurement.measurement.field*" +``` + +You can set the following keywords: + +- `measurement`: Specifies that this section of the graphite bucket corresponds +to the measurement name. This can be specified multiple times. +- `field`: Specifies that this section of the graphite bucket corresponds +to the field name. This can be specified multiple times. +- `measurement*`: Specifies that all remaining elements of the graphite bucket +correspond to the measurement name. +- `field*`: Specifies that all remaining elements of the graphite bucket +correspond to the field name. + +{{% note %}} +`field*` can't be used in conjunction with `measurement*`. +{{% /note %}} + +Any part of the template that isn't a keyword is treated as a tag key, which can also be used multiple times. + +### Examples + +#### Measurement and tag templates + +The most basic template is to specify a single transformation to apply to all +incoming metrics. + +##### Template + +```toml +templates = [ + "region.region.measurement*" +] +``` + +##### Resulting transformation + +``` +us.west.cpu.load 100 +=> cpu.load,region=us.west value=100 +``` + +You can also specify multiple templates using [filters](#filter-templates). + +```toml +templates = [ + "*.*.* region.region.measurement", # <- all 3-part measurements will match this one. + "*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one. +] +``` + +#### Field templates + +The field keyword tells Telegraf to give the metric that field name. + +##### Template + +```toml +separator = "_" +templates = [ + "measurement.measurement.field.field.region" +] +``` + +##### Resulting transformation + +``` +cpu.usage.idle.percent.eu-east 100 +=> cpu_usage,region=eu-east idle_percent=100 +``` + +You can also derive the field key from all remaining elements of the graphite +bucket by specifying `field*`. + +##### Template + +```toml +separator = "_" +templates = [ + "measurement.measurement.region.field*" +] +``` + +##### Resulting transformation + +``` +cpu.usage.eu-east.idle.percentage 100 +=> cpu_usage,region=eu-east idle_percentage=100 +``` + +#### Filter templates + +You can also filter templates based on the name of the bucket +using a wildcard. + +##### Template + +```toml +templates = [ + "cpu.* measurement.measurement.region", + "mem.* measurement.measurement.host" +] +``` + +##### Resulting transformation + +``` +cpu.load.eu-east 100 +=> cpu_load,region=eu-east value=100 + +mem.cached.localhost 256 +=> mem_cached,host=localhost value=256 +``` + +#### Adding tags + +You can add additional tags to a metric that don't exist on the received metric by specifying them after the pattern. Tags have the same format as the line protocol. +Separate multiple tags with commas. + +##### Template + +```toml +templates = [ + "measurement.measurement.field.region datacenter=1a" +] +``` + +##### Resulting transformation + +``` +cpu.usage.idle.eu-east 100 +=> cpu_usage,region=eu-east,datacenter=1a idle=100 +``` diff --git a/content/telegraf/v1.24/data_formats/input/grok.md b/content/telegraf/v1.24/data_formats/input/grok.md new file mode 100644 index 000000000..ff6d3817c --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/grok.md @@ -0,0 +1,227 @@ +--- +title: Grok input data format +description: Use the grok data format to parse line-delimited data using a regular expression-like language. +menu: + telegraf_1_24_ref: + + name: Grok + weight: 40 + parent: Input data formats +--- + +The grok data format parses line delimited data using a regular expression-like +language. + +If you need to become familiar with grok patterns, see [Grok Basics](https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html#_grok_basics) +in the Logstash documentation. The grok parser uses a slightly modified version of logstash "grok" +patterns, using the format: + +``` +%{[:][:]} +``` + +The `capture_syntax` defines the grok pattern that is used to parse the input +line and the `semantic_name` is used to name the field or tag. The extension +`modifier` controls the data type that the parsed item is converted to or +other special handling. + +By default, all named captures are converted into string fields. +Timestamp modifiers can be used to convert captures to the timestamp of the +parsed metric. If no timestamp is parsed the metric will be created using the +current time. + +You must capture at least one field per line. + +- Available modifiers: + - string (default if nothing is specified) + - int + - float + - duration (ie, 5.23ms gets converted to int nanoseconds) + - tag (converts the field into a tag) + - drop (drops the field completely) + - measurement (use the matched text as the measurement name) +- Timestamp modifiers: + - ts (This will auto-learn the timestamp format) + - ts-ansic ("Mon Jan _2 15:04:05 2006") + - ts-unix ("Mon Jan _2 15:04:05 MST 2006") + - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") + - ts-rfc822 ("02 Jan 06 15:04 MST") + - ts-rfc822z ("02 Jan 06 15:04 -0700") + - ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") + - ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") + - ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") + - ts-rfc3339 ("2006-01-02T15:04:05Z07:00") + - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") + - ts-httpd ("02/Jan/2006:15:04:05 -0700") + - ts-epoch (seconds since unix epoch, may contain decimal) + - ts-epochnano (nanoseconds since unix epoch) + - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year) + - ts-"CUSTOM" + +CUSTOM time layouts must be within quotes and be the representation of the +"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. +To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"` +To match a comma decimal point you can use a period in the pattern string. +See https://golang.org/pkg/time/#Parse for more details. + +Telegraf has many of its own [built-in patterns](https://github.com/influxdata/telegraf/blob/master/plugins/parsers/grok/influx_patterns.go), +as well as support for most of +[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/main/patterns/ecs-v1/grok-patterns. +_Golang regular expressions do not support lookahead or lookbehind. +logstash patterns that depend on these are not supported._ + +If you need help building patterns to match your logs, the +[Grok Debugger application](https://grokdebug.herokuapp.com) might be helpful. + +## Configuration + +```toml +[[inputs.file]] + ## Files to parse each interval. + ## These accept standard unix glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/**.log -> recursively find all .log files in /var/log + ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log + ## /var/log/apache.log -> only tail the apache log file + files = ["/var/log/apache/access.log"] + + ## The dataformat to be read from files + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "grok" + + ## This is a list of patterns to check the given log file(s) for. + ## Note that adding patterns here increases processing time. The most + ## efficient configuration is to have one pattern. + ## Other common built-in patterns are: + ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) + ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) + grok_patterns = ["%{COMBINED_LOG_FORMAT}"] + + ## Full path(s) to custom pattern files. + grok_custom_pattern_files = [] + + ## Custom patterns can also be defined here. Put one pattern per line. + grok_custom_patterns = ''' + ''' + + ## Timezone allows you to provide an override for timestamps that + ## don't already include an offset + ## e.g. 04/06/2016 12:41:45 data one two 5.43µs + ## + ## Default: "" which renders UTC + ## Options are as follows: + ## 1. Local -- interpret based on machine localtime + ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + ## 3. UTC -- or blank/unspecified, will return timestamp in UTC + grok_timezone = "Canada/Eastern" +``` + +### Timestamp examples + +This example input and config parses a file using a custom timestamp conversion: + +``` +2017-02-21 13:10:34 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] +``` + +This example input and config parses a file using a timestamp in unix time: + +``` +1466004605 value=42 +1466004605.123456789 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] +``` + +This example parses a file using a built-in conversion and a custom pattern: + +``` +Wed Apr 12 13:10:34 PST 2017 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] + grok_custom_patterns = ''' + TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} + ''' +``` + +For cases where the timestamp itself is without offset, the `timezone` config var is available +to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times +are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp +will be processed based on the current machine timezone configuration. Lastly, if using a +timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +grok will offset the timestamp accordingly. + +### TOML escaping + +When saving patterns to the configuration file, keep in mind the different TOML +[string](https://github.com/toml-lang/toml#string) types and the escaping +rules for each. These escaping rules must be applied in addition to the +escaping required by the grok syntax. Using the Multi-line line literal +syntax with `'''` may be useful. + +The following config examples will parse this input file: + +``` +|42|\uD83D\uDC2F|'telegraf'| +``` + +Since `|` is a special character in the grok language, we must escape it to +get a literal `|`. With a basic TOML string, special characters such as +backslash must be escaped, requiring us to escape the backslash a second time. + +```toml +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" +``` + +We cannot use a literal TOML string for the pattern, because we cannot match a +`'` within it. However, it works well for the custom pattern. +```toml +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +A multi-line literal string allows us to encode the pattern: +```toml +[[inputs.file]] + grok_patterns = [''' + \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| + '''] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +### Tips for creating patterns + +Writing complex patterns can be difficult, here is some advice for writing a +new pattern or testing a pattern developed [online](https://grokdebug.herokuapp.com). + +Create a file output that writes to stdout, and disable other outputs while +testing. This will allow you to see the captured metrics. Keep in mind that +the file output will only print once per `flush_interval`. + +```toml +[[outputs.file]] + files = ["stdout"] +``` + +- Start with a file containing only a single line of your input. +- Remove all but the first token or piece of the line. +- Add the section of your pattern to match this piece to your configuration file. +- Verify that the metric is parsed successfully by running Telegraf. +- If successful, add the next token, update the pattern and retest. +- Continue one token at a time until the entire line is successfully parsed. diff --git a/content/telegraf/v1.24/data_formats/input/influx.md b/content/telegraf/v1.24/data_formats/input/influx.md new file mode 100644 index 000000000..c60bffa57 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/influx.md @@ -0,0 +1,28 @@ +--- +title: InfluxDB Line Protocol input data format +description: Use the InfluxDB Line Protocol input data format to parse InfluxDB metrics directly into Telegraf metrics. +menu: + telegraf_1_24_ref: + + name: InfluxDB Line Protocol input + weight: 60 + parent: Input data formats +--- + +There are no additional configuration options for InfluxDB [line protocol][]. The +InfluxDB metrics are parsed directly into Telegraf metrics. + +[line protocol]: /{{< latest "influxdb" "v1" >}}/write_protocols/line/ + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` diff --git a/content/telegraf/v1.24/data_formats/input/json.md b/content/telegraf/v1.24/data_formats/input/json.md new file mode 100644 index 000000000..a0b071d48 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/json.md @@ -0,0 +1,228 @@ +--- +title: JSON input data format +description: Use the JSON input data format to parse [JSON][json] objects, or an array of objects, into Telegraf metric fields. +menu: + telegraf_1_24_ref: + + name: JSON input + weight: 70 + parent: Input data formats +--- + +{{% note %}} +The following information applies to the legacy JSON input data format. For most cases, we recommend using the [JSON v2 input data format](/{{< latest "telegraf" >}}/data_formats/input/json_v2/) instead. +{{% /note %}} + +The JSON input data format parses a [JSON][json] object or an array of objects +into Telegraf metric fields. + +**NOTE:** All JSON numbers are converted to float fields. JSON String are +ignored unless specified in the `tag_key` or `json_string_fields` options. + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" + + ## Query is a GJSON path that specifies a specific chunk of JSON to be + ## parsed, if not specified the whole document will be parsed. + ## + ## GJSON query paths are described here: + ## https://github.com/tidwall/gjson#path-syntax + json_query = "" + + ## Tag keys is an array of keys that should be added as tags. + tag_keys = [ + "my_tag_1", + "my_tag_2" + ] + + ## String fields is an array of keys that should be added as string fields. + json_string_fields = [] + + ## Name key is the key to use as the measurement name. + json_name_key = "" + + ## Time key is the key containing the time that should be used to create the + ## metric. + json_time_key = "" + + ## Time format is the time layout that should be used to interprete the + ## json_time_key. The time must be `unix`, `unix_ms` or a time in the + ## "reference time". + ## ex: json_time_format = "Mon Jan 2 15:04:05 -0700 MST 2006" + ## json_time_format = "2006-01-02T15:04:05Z07:00" + ## json_time_format = "unix" + ## json_time_format = "unix_ms" + json_time_format = "" +``` + +### `json_query` + +The `json_query` is a [GJSON][gjson] path that can be used to limit the +portion of the overall JSON document that should be parsed. The result of the +query should contain a JSON object or an array of objects. + +Consult the GJSON [path syntax][gjson syntax] for details and examples. + +### json_time_key, json_time_format + +By default the current time will be used for all created metrics, to set the +time using the JSON document you can use the `json_time_key` and +`json_time_format` options together to set the time to a value in the parsed +document. + +The `json_time_key` option specifies the key containing the time value and +`json_time_format` must be set to `unix`, `unix_ms`, or the Go "reference +time" which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006`. + +Consult the Go [time][time parse] package for details and additional examples +on how to set the time format. + +## Examples + +### Basic parsing + +Config: +```toml +[[inputs.file]] + files = ["example"] + name_override = "myjsonmetric" + data_format = "json" +``` + +Input: +```json +{ + "a": 5, + "b": { + "c": 6 + }, + "ignored": "I'm a string" +} +``` + +Output: +``` +myjsonmetric a=5,b_c=6 +``` + +### Name, tags, and string fields + +Config: +```toml +[[inputs.file]] + files = ["example"] + json_name_key = "name" + tag_keys = ["my_tag_1"] + json_string_fields = ["my_field"] + data_format = "json" +``` + +Input: +```json +{ + "a": 5, + "b": { + "c": 6, + "my_field": "description" + }, + "my_tag_1": "foo", + "name": "my_json" +} +``` + +Output: +``` +my_json,my_tag_1=foo a=5,b_c=6,my_field="description" +``` + +### Arrays + +If the JSON data is an array, then each object within the array is parsed with +the configured settings. + +Config: +```toml +[[inputs.file]] + files = ["example"] + data_format = "json" + json_time_key = "b_time" + json_time_format = "02 Jan 06 15:04 MST" +``` + +Input: +```json +[ + { + "a": 5, + "b": { + "c": 6, + "time":"04 Jan 06 15:04 MST" + }, + }, + { + "a": 7, + "b": { + "c": 8, + "time":"11 Jan 07 15:04 MST" + }, + } +] +``` + +Output: +``` +file a=5,b_c=6 1136387040000000000 +file a=7,b_c=8 1168527840000000000 +``` + +### Query + +The `json_query` option can be used to parse a subset of the document. + +Config: +```toml +[[inputs.file]] + files = ["example"] + data_format = "json_v2" + tag_keys = ["first"] + json_string_fields = ["last"] + json_query = "obj.friends" +``` + +Input: +```json +{ + "obj": { + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44}, + {"first": "Roger", "last": "Craig", "age": 68}, + {"first": "Jane", "last": "Murphy", "age": 47} + ] + } +} +``` + +Output: +``` +file,first=Dale last="Murphy",age=44 +file,first=Roger last="Craig",age=68 +file,first=Jane last="Murphy",age=47 +``` + +[gjson]: https://github.com/tidwall/gjson +[gjson syntax]: https://github.com/tidwall/gjson#path-syntax +[json]: https://www.json.org/ +[time parse]: https://golang.org/pkg/time/#Parse diff --git a/content/telegraf/v1.24/data_formats/input/json_v2.md b/content/telegraf/v1.24/data_formats/input/json_v2.md new file mode 100644 index 000000000..9da1fedc3 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/json_v2.md @@ -0,0 +1,174 @@ +--- +title: JSON v2 input data format +description: Use the JSON v2 input data format to parse [JSON][json] objects, or an array of objects, into Telegraf metric fields. +menu: + telegraf_1_24_ref: + + name: JSON v2 input + weight: 70 + parent: Input data formats +--- + +The JSON v2 input data format parses a [JSON][json] object or an array of objects into Telegraf metric fields. +This parser takes valid JSON input and turns it into metrics. + +The query syntax supported is [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md), +Use to [this playground](https://gjson.dev/) to test out your GJSON path. + +You can find multiple examples [here](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/json_v2/testdata) in the Telegraf repository. + + + +## Configuration + +Configure this parser by describing the metric you want by defining the fields and tags from the input. +The configuration is divided into config sub-tables called `field`, `tag`, and `object`. +In the example below you can see all the possible configuration keys you can define for each config table. +In the sections that follow these configuration keys are defined in more detail. + +```toml + [[inputs.file]] + urls = [] + data_format = "json_v2" + + [[inputs.file.json_v2]] + measurement_name = "" # A string that will become the new measurement name + measurement_name_path = "" # A string with valid GJSON path syntax, will override measurement_name + timestamp_path = "" # A string with valid GJSON path syntax to a valid timestamp (single value) + timestamp_format = "" # A string with a valid timestamp format (see below for possible values) + timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) + + [[inputs.file.json_v2.field]] + path = "" # A string with valid GJSON path syntax + rename = "new name" # A string with a new name for the tag key + type = "int" # A string specifying the type (int,uint,float,string,bool) + + [[inputs.file.json_v2.tag]] + path = "" # A string with valid GJSON path syntax + rename = "new name" # A string with a new name for the tag key + + [[inputs.file.json_v2.object]] + path = "" # A string with valid GJSON path syntax + timestamp_key = "" # A JSON key (for a nested key, prepend the parent keys with underscores) to a valid timestamp + timestamp_format = "" # A string with a valid timestamp format (see below for possible values) + timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) + disable_prepend_keys = false (or true, just not both) + included_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that should be only included in result + excluded_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that shouldn't be included in result + tags = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field + [inputs.file.json_v2.object.renames] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a new name for the tag key + key = "new name" + [inputs.file.json_v2.object.fields] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a type (int,uint,float,string,bool) + key = "int" +``` + +### Root configuration options + +* **measurement_name (OPTIONAL)**: Will set the measurement name to the provided string. +* **measurement_name_path (OPTIONAL)**: You can define a query with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) to set a measurement name from the JSON input. + The query must return a single data value or it will use the default measurement name. + This takes precedence over `measurement_name`. +* **timestamp_path (OPTIONAL)**: You can define a query with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) to set a timestamp from the JSON input. + The query must return a single data value or it will default to the current time. +* **timestamp_format (OPTIONAL, but REQUIRED when timestamp_query is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or + the Go "reference time" which is defined to be the specific time: + `Mon Jan 2 15:04:05 MST 2006` +* **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_query**: This option should be set to a + [Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), + such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. + Defaults to `UTC` + +## Arrays and Objects + +The following describes the high-level approach when parsing arrays and objects: + +- **Array**: Every element in an array is treated as a *separate* metric +- **Object**: Every key-value in a object is treated as a *single* metric + +When handling nested arrays and objects, the rules above continue to apply as the parser creates metrics. +When an object has multiple arrays as values, +the arrays will become separate metrics containing only non-array values from the object. +Below you can see an example of this behavior, +with an input JSON containing an array of book objects that has a nested array of characters. + +**Example JSON:** + +```json +{ + "book": { + "title": "The Lord Of The Rings", + "chapters": [ + "A Long-expected Party", + "The Shadow of the Past" + ], + "author": "Tolkien", + "characters": [ + { + "name": "Bilbo", + "species": "hobbit" + }, + { + "name": "Frodo", + "species": "hobbit" + } + ], + "random": [ + 1, + 2 + ] + } +} + +``` + +**Example configuration:** + +```toml +[[inputs.file]] + files = ["./testdata/multiple_arrays_in_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "book" + tags = ["title"] + disable_prepend_keys = true +``` + +**Expected metrics:** + +``` +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",name="Bilbo",species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",name="Frodo",species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",random=1 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",random=2 + +``` + +You can find more complicated examples under the folder `testdata`. + +## Types + +For each field you have the option to define the types for each metric. +The following rules are in place for this configuration: + +* If a type is explicitly defined, the parser will enforce this type and convert the data to the defined type if possible. + If the type can't be converted then the parser will fail. +* If a type isn't defined, the parser will use the default type defined in the JSON (int, float, string) + +The type values you can set: + +* `int`, bool, floats or strings (with valid numbers) can be converted to a int. +* `uint`, bool, floats or strings (with valid numbers) can be converted to a uint. +* `string`, any data can be formatted as a string. +* `float`, string values (with valid numbers) or integers can be converted to a float. +* `bool`, the string values "true" or "false" (regardless of capitalization) or the integer values `0` or `1` can be turned to a bool. + +[json]: https://www.json.org/ diff --git a/content/telegraf/v1.24/data_formats/input/logfmt.md b/content/telegraf/v1.24/data_formats/input/logfmt.md new file mode 100644 index 000000000..7e240a544 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/logfmt.md @@ -0,0 +1,43 @@ +--- +title: Logfmt input data format +description: Use the `logfmt` input data format to parse logfmt data into Telegraf metrics. +menu: + telegraf_1_24_ref: + + name: logfmt + weight: 80 + parent: Input data formats +--- + +The `logfmt` data format parses [logfmt] data into Telegraf metrics. + +[logfmt]: https://brandur.org/logfmt + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "logfmt" + + ## Set the name of the created metric, if unset the name of the plugin will + ## be used. + metric_name = "logfmt" +``` + +## Metrics + +Each key-value pair in the line is added to a new metric as a field. The type +of the field is automatically determined based on the contents of the value. + +## Examples + +``` +- method=GET host=example.org ts=2018-07-24T19:43:40.275Z connect=4ms service=8ms status=200 bytes=1653 ++ logfmt method="GET",host="example.org",ts="2018-07-24T19:43:40.275Z",connect="4ms",service="8ms",status=200i,bytes=1653i +``` diff --git a/content/telegraf/v1.24/data_formats/input/nagios.md b/content/telegraf/v1.24/data_formats/input/nagios.md new file mode 100644 index 000000000..969b481c3 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/nagios.md @@ -0,0 +1,30 @@ +--- +title: Nagios input data format +description: Use the Nagios input data format to parse the output of Nagios plugins into Telegraf metrics. +menu: + telegraf_1_24_ref: + + name: Nagios + weight: 90 + parent: Input data formats +--- + +# Nagios + +The Nagios input data format parses the output of +[Nagios plugins](https://www.nagios.org/downloads/nagios-plugins/) into +Telegraf metrics. + +## Configuration + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "nagios" +``` diff --git a/content/telegraf/v1.24/data_formats/input/prometheus-remote-write.md b/content/telegraf/v1.24/data_formats/input/prometheus-remote-write.md new file mode 100644 index 000000000..b12ad55db --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/prometheus-remote-write.md @@ -0,0 +1,62 @@ +--- +title: Prometheus Remote Write input data format +description: | + Use the Prometheus Remote Write input data format to write samples directly into Telegraf metrics. +menu: + telegraf_1_24_ref: + + name: Prometheus Remote Write + weight: 40 + parent: Input data formats +--- + +Use the Prometheus Remote Write plugin to convert [Prometheus Remote Write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write) samples directly into Telegraf metrics. + +{{% note %}} +If you are using InfluxDB 1.x and the [Prometheus Remote Write endpoint](https://github.com/influxdata/telegraf/blob/master/plugins/parsers/prometheusremotewrite/README.md +to write in metrics, you can migrate to InfluxDB 2.0 and use this parser. +For the metrics to completely align with the 1.x endpoint, add a Starlark processor as described [here](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/README.md). + +{{% /note %}} + +### Configuration + +Use the [`inputs.http_listener_v2`](/telegraf/v1.23/plugins/#input-http_listener_v2) plug and set `data_format = "prometheusremotewrite"` + +```toml +[[inputs.http_listener_v2]] + ## Address and port to host HTTP listener on + service_address = ":1234" + ## Path to listen to. + path = "/recieve" + ## Data format to consume. + data_format = "prometheusremotewrite" +``` + +### Example + +**Example Input** +``` +prompb.WriteRequest{ + Timeseries: []*prompb.TimeSeries{ + { + Labels: []*prompb.Label{ + {Name: "__name__", Value: "go_gc_duration_seconds"}, + {Name: "instance", Value: "localhost:9090"}, + {Name: "job", Value: "prometheus"}, + {Name: "quantile", Value: "0.99"}, + }, + Samples: []prompb.Sample{ + {Value: 4.63, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } +``` + +**Example Output** +``` +prometheus_remote_write,instance=localhost:9090,job=prometheus,quantile=0.99 go_gc_duration_seconds=4.63 1614889298859000000 +``` + +[here]: https://github.com/influxdata/telegraf/tree/master/plugins/parsers/prometheusremotewrite#for-alignment-with-the-influxdb-v1x-prometheus-remote-write-spec diff --git a/content/telegraf/v1.24/data_formats/input/value.md b/content/telegraf/v1.24/data_formats/input/value.md new file mode 100644 index 000000000..38b292f66 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/value.md @@ -0,0 +1,45 @@ +--- +title: Value input data format +description: Use the `value` input data format to parse single values into Telegraf metrics. +menu: + telegraf_1_24_ref: + + name: Value + weight: 100 + parent: Input data formats +--- + + +The "value" input data format translates single values into Telegraf metrics. This +is done by assigning a measurement name and setting a single field ("value") +as the parsed metric. + +## Configuration + +You **must** tell Telegraf what type of metric to collect by using the +`data_type` configuration option. Available data type options are: + +1. integer +2. float or long +3. string +4. boolean + +> **Note:** It is also recommended that you set `name_override` to a measurement +name that makes sense for your metric; otherwise, it will just be set to the +name of the plugin. + +```toml +[[inputs.exec]] + ## Commands array + commands = ["cat /proc/sys/kernel/random/entropy_avail"] + + ## override the default metric name of "exec" + name_override = "entropy_available" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "value" + data_type = "integer" # required +``` diff --git a/content/telegraf/v1.24/data_formats/input/wavefront.md b/content/telegraf/v1.24/data_formats/input/wavefront.md new file mode 100644 index 000000000..615278ebc --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/wavefront.md @@ -0,0 +1,29 @@ +--- +title: Wavefront input data format +description: Use the Wavefront input data format to parse Wavefront data into Telegraf metrics. +menu: + telegraf_1_24_ref: + + name: Wavefront + weight: 110 + parent: Input data formats +--- + +The Wavefront input data format parses Wavefront data into Telegraf metrics. +For more information on the Wavefront native data format, see +[Wavefront Data Format](https://docs.wavefront.com/wavefront_data_format.html) in the Wavefront documentation. + +## Configuration + +There are no additional configuration options for Wavefront Data Format line-protocol. + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "wavefront" +``` diff --git a/content/telegraf/v1.24/data_formats/input/xml.md b/content/telegraf/v1.24/data_formats/input/xml.md new file mode 100644 index 000000000..ee67b4d24 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/input/xml.md @@ -0,0 +1,59 @@ +--- +title: XML input data format +description: Use the XML input data format to parse XML data into Telegraf metrics. +menu: + telegraf_1_24_ref: + + name: XML + weight: 110 + parent: Input data formats +--- + +The XML input data format parses XML data into Telegraf metrics. + + +## Configuration + +```toml +[[inputs.file]] + files = ["example.xml"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "xml" + + ## Multiple parsing sections are allowed + [[inputs.file.xml]] + ## Optional: XPath-query to select a subset of nodes from the XML document. + #metric_selection = "/Bus/child::Sensor" + + ## Optional: XPath-query to set the metric (measurement) name. + #metric_name = "string('example')" + + ## Optional: Query to extract metric timestamp. + ## If not specified the time of execution is used. + #timestamp = "/Gateway/Timestamp" + ## Optional: Format of the timestamp determined by the query above. + ## This can be any of "unix", "unix_ms", "unix_us", "unix_ns" or a valid Golang + ## time format. If not specified, a "unix" timestamp (in seconds) is expected. + #timestamp_format = "2006-01-02T15:04:05Z" + + ## Tag definitions using the given XPath queries. + [inputs.file.xml.tags] + name = "substring-after(Sensor/@name, ' ')" + device = "string('the ultimate sensor')" + + ## Integer field definitions using XPath queries. + [inputs.file.xml.fields_int] + consumers = "Variable/@consumers" + + ## Non-integer field definitions using XPath queries. + ## The field type is defined using XPath expressions such as number(), boolean() or string(). If no conversion is performed the field will be of type string. + [inputs.file.xml.fields] + temperature = "number(Variable/@temperature)" + power = "number(Variable/@power)" + frequency = "number(Variable/@frequency)" + ok = "Mode != 'ok'" +``` diff --git a/content/telegraf/v1.24/data_formats/output/_index.md b/content/telegraf/v1.24/data_formats/output/_index.md new file mode 100644 index 000000000..3c676c7cb --- /dev/null +++ b/content/telegraf/v1.24/data_formats/output/_index.md @@ -0,0 +1,31 @@ +--- +title: Telegraf output data formats +description: Telegraf serializes metrics into output data formats. +menu: + telegraf_1_24_ref: + + name: Output data formats + weight: 1 + parent: Data formats +--- + +In addition to output-specific data formats, Telegraf supports the following set +of common data formats that may be selected when configuring many of the Telegraf +output plugins. + +{{< children >}} + +You will be able to identify the plugins with support by the presence of a +`data_format` configuration option, for example, in the File (`file`) output plugin: + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" +``` diff --git a/content/telegraf/v1.24/data_formats/output/carbon2.md b/content/telegraf/v1.24/data_formats/output/carbon2.md new file mode 100644 index 000000000..1c9573ea9 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/output/carbon2.md @@ -0,0 +1,61 @@ +--- +title: Carbon2 output data format +description: Use the Carbon2 output data format (serializer) to convert Telegraf metrics into the Carbon2 format. +menu: + telegraf_1_24_ref: + + name: Carbon2 + weight: 10 + parent: Output data formats +--- + +The `carbon2` output data format (serializer) translates the Telegraf metric format to the [Carbon2 format](http://metrics20.org/implementations/). + +### Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "carbon2" +``` + +Standard form: + +``` +metric=name field=field_1 host=foo 30 1234567890 +metric=name field=field_2 host=foo 4 1234567890 +metric=name field=field_N host=foo 59 1234567890 +``` + +### Metrics + +The serializer converts the metrics by creating `intrinsic_tags` using the combination of metric name and fields. So, if one Telegraf metric has 4 fields, the `carbon2` output will be 4 separate metrics. There will be a `metric` tag that represents the name of the metric and a `field` tag to represent the field. + +### Example + +If we take the following InfluxDB Line Protocol: + +``` +weather,location=us-midwest,season=summer temperature=82,wind=100 1234567890 +``` + +After serializing in Carbon2, the result would be: + +``` +metric=weather field=temperature location=us-midwest season=summer 82 1234567890 +metric=weather field=wind location=us-midwest season=summer 100 1234567890 +``` + +### Fields and tags with spaces + +When a field key or tag key-value have spaces, spaces will be replaced with `_`. + +### Tags with empty values + +When a tag's value is empty, it will be replaced with `null`. diff --git a/content/telegraf/v1.24/data_formats/output/graphite.md b/content/telegraf/v1.24/data_formats/output/graphite.md new file mode 100644 index 000000000..ce1421dbf --- /dev/null +++ b/content/telegraf/v1.24/data_formats/output/graphite.md @@ -0,0 +1,59 @@ +--- +title: Graphite output data format +description: Use the Graphite output data format to serialize data from Telegraf metrics. +menu: + telegraf_1_24_ref: + + name: Graphite output + weight: 20 + parent: Output data formats +--- + +The Graphite data format is serialized from Telegraf metrics using either the +template pattern or tag support method. You can select between the two +methods using the [`graphite_tag_support`](#graphite_tag_support) option. When set, the tag support method is used, +otherwise the [template pattern][templates]) option is used. + +## Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "graphite" + + ## Prefix added to each graphite bucket + prefix = "telegraf" + ## Graphite template pattern + template = "host.tags.measurement.field" + + ## Support Graphite tags, recommended to enable when using Graphite 1.1 or later. + # graphite_tag_support = false +``` + +### graphite_tag_support + +When the `graphite_tag_support` option is enabled, the template pattern is not +used. Instead, tags are encoded using +[Graphite tag support](http://graphite.readthedocs.io/en/latest/tags.html), +added in Graphite 1.1. The `metric_path` is a combination of the optional +`prefix` option, measurement name, and field name. + +The tag `name` is reserved by Graphite, any conflicting tags and will be encoded as `_name`. + +**Example conversion**: +``` +cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 +=> +cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 +cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 +``` + +### templates + +For more information on templates and template patterns, see [Template patterns](/telegraf/v1.15/data_formats/template-patterns/). diff --git a/content/telegraf/v1.24/data_formats/output/influx.md b/content/telegraf/v1.24/data_formats/output/influx.md new file mode 100644 index 000000000..f59fb9d68 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/output/influx.md @@ -0,0 +1,42 @@ +--- +title: InfluxDB Line Protocol output data format +description: The `influx` data format outputs metrics into the InfluxDB Line Protocol format. +menu: + telegraf_1_24_ref: + + name: InfluxDB Line Protocol + weight: 30 + parent: Output data formats +--- + +The `influx` output data format outputs metrics into [InfluxDB Line Protocol][line protocol]. InfluxData recommends this data format unless another format is required for interoperability. + +## Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" + + ## Maximum line length in bytes. Useful only for debugging. + influx_max_line_bytes = 0 + + ## When true, fields will be output in ascending lexical order. Enabling + ## this option will result in decreased performance and is only recommended + ## when you need predictable ordering while debugging. + influx_sort_fields = false + + ## When true, Telegraf will output unsigned integers as unsigned values, + ## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned + ## integer values. Enabling this option will result in field type errors if + ## existing data has been written. + influx_uint_support = false +``` + +[line protocol]: /{{< latest "influxdb" "v1" >}}/write_protocols/line_protocol_tutorial/ diff --git a/content/telegraf/v1.24/data_formats/output/json.md b/content/telegraf/v1.24/data_formats/output/json.md new file mode 100644 index 000000000..cff00a936 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/output/json.md @@ -0,0 +1,90 @@ +--- +title: JSON output data format +description: Telegraf's `json` output data format converts metrics into JSON documents. +menu: + telegraf_1_24_ref: + + name: JSON + weight: 40 + parent: Output data formats +--- + +The `json` output data format serializes Telegraf metrics into JSON documents. + +## Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "json" + + ## The resolution to use for the metric timestamp. Must be a duration string + ## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to + ## the power of 10 less than the specified units. + json_timestamp_units = "1s" +``` + +## Examples + +### Standard format + +```json +{ + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 +} +``` + +### Batch format + +When an output plugin needs to emit multiple metrics at one time, it may use the +batch format. The use of batch format is determined by the plugin -- reference +the documentation for the specific plugin. + +```json +{ + "metrics": [ + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 + }, + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 + } + ] +} +``` diff --git a/content/telegraf/v1.24/data_formats/output/messagepack.md b/content/telegraf/v1.24/data_formats/output/messagepack.md new file mode 100644 index 000000000..2218a1811 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/output/messagepack.md @@ -0,0 +1,49 @@ +--- +title: MessagePack output data format +description: Use the MessagePack output data format (serializer) to convert Telegraf metrics into MessagePack format. +menu: + telegraf_1_24_ref: + + name: MessagePack + weight: 10 + parent: Output data formats +--- + +The `msgpack` output data format (serializer) translates the Telegraf metric format to the [MessagePack](https://msgpack.org/). MessagePack is an efficient binary serialization format that lets you exchange data among multiple languages like JSON. + +### Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "msgpack" +``` + + +### Example output + +Output of this format is MessagePack binary representation of metrics with a structure identical to the below JSON: + +``` +{ + "name":"cpu", + "time": , // https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type + "tags":{ + "tag_1":"host01", + ... + }, + "fields":{ + "field_1":30, + "field_2":true, + "field_3":"field_value" + "field_4":30.1 + ... + } +} +``` diff --git a/content/telegraf/v1.24/data_formats/output/nowmetric.md b/content/telegraf/v1.24/data_formats/output/nowmetric.md new file mode 100644 index 000000000..64dc4567e --- /dev/null +++ b/content/telegraf/v1.24/data_formats/output/nowmetric.md @@ -0,0 +1,91 @@ +--- +title: ServiceNow Metrics output data format +description: Use the ServiceNow Metrics output data format (serializer) to output metrics in the ServiceNow Operational Intelligence format. +menu: + telegraf_1_24_ref: + + name: ServiceNow Metrics + weight: 50 + parent: Output data formats +--- + +The ServiceNow Metrics output data format (serializer) outputs metrics in the [ServiceNow Operational Intelligence format](https://docs.servicenow.com/bundle/kingston-it-operations-management/page/product/event-management/reference/mid-POST-metrics.html). + +It can be used to write to a file using the File output plugin, or for sending metrics to a MID Server with Enable REST endpoint activated using the standard telegraf HTTP output. +If you're using the HTTP output plugin, this serializer knows how to batch the metrics so you don't end up with an HTTP POST per metric. + +An example event looks like: + +```javascript +[{ + "metric_type": "Disk C: % Free Space", + "resource": "C:\\", + "node": "lnux100", + "value": 50, + "timestamp": 1473183012000, + "ci2metric_id": { + "node": "lnux100" + }, + "source": “Telegraf” +}] +``` + +## Using with the HTTP output plugin + +To send this data to a ServiceNow MID Server with Web Server extension activated, you can use the HTTP output plugin, there are some custom headers that you need to add to manage the MID Web Server authorization, here's a sample config for an HTTP output: + +```toml +[[outputs.http]] + ## URL is the address to send metrics to + url = "http://:9082/api/mid/sa/metrics" + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP method, one of: "POST" or "PUT" + method = "POST" + + ## HTTP Basic Auth credentials + username = 'evt.integration' + password = 'P@$$w0rd!' + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "nowmetric" + + ## Additional HTTP headers + [outputs.http.headers] + # # Should be set manually to "application/json" for json data_format + Content-Type = "application/json" + Accept = "application/json" +``` + +Starting with the London release, you also need to explicitly create event rule to allow binding of metric events to host CIs. + +https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/task/event-rule-bind-metrics-to-host.html + +## Using with the File output plugin + +You can use the File output plugin to output the payload in a file. +In this case, just add the following section to your telegraf configuration file. + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["C:/Telegraf/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "nowmetric" +``` diff --git a/content/telegraf/v1.24/data_formats/output/splunkmetric.md b/content/telegraf/v1.24/data_formats/output/splunkmetric.md new file mode 100644 index 000000000..fe3fd30f7 --- /dev/null +++ b/content/telegraf/v1.24/data_formats/output/splunkmetric.md @@ -0,0 +1,148 @@ +--- +title: SplunkMetric output data format +description: The SplunkMetric serializer formats and outputs data in a format that can be consumed by a Splunk metrics index. +menu: + telegraf_1_24_ref: + + name: SplunkMetric + weight: 60 + parent: Output data formats +--- + +The SplunkMetric serializer formats and outputs the metric data in a format that can be consumed by a Splunk metrics index. +It can be used to write to a file using the file output, or for sending metrics to a HEC using the standard Telegraf HTTP output. + +If you're using the HTTP output, this serializer knows how to batch the metrics so you don't end up with an HTTP POST per metric. + +Th data is output in a format that conforms to the specified Splunk HEC JSON format as found here: +[Send metrics in JSON format](http://dev.splunk.com/view/event-collector/SP-CAAAFDN). + +An example event looks like: +```javascript +{ + "time": 1529708430, + "event": "metric", + "host": "patas-mbp", + "fields": { + "_value": 0.6, + "cpu": "cpu0", + "dc": "mobile", + "metric_name": "cpu.usage_user", + "user": "ronnocol" + } +} +``` +In the above snippet, the following keys are dimensions: +* cpu +* dc +* user + +## Using with the HTTP output + +To send this data to a Splunk HEC, you can use the HTTP output, there are some custom headers that you need to add +to manage the HEC authorization, here's a sample config for an HTTP output: + +```toml +[[outputs.http]] + ## URL is the address to send metrics to + url = "https://localhost:8088/services/collector" + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP method, one of: "POST" or "PUT" + # method = "POST" + + ## HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "splunkmetric" + ## Provides time, index, source overrides for the HEC + splunkmetric_hec_routing = true + + ## Additional HTTP headers + [outputs.http.headers] + # Should be set manually to "application/json" for json data_format + Content-Type = "application/json" + Authorization = "Splunk xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + X-Splunk-Request-Channel = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +``` + +## Overrides +You can override the default values for the HEC token you are using by adding additional tags to the config file. + +The following aspects of the token can be overriden with tags: +* index +* source + +You can either use `[global_tags]` or using a more advanced configuration as documented [here](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md). + +Such as this example which overrides the index just on the cpu metric: +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + [inputs.cpu.tags] + index = "cpu_metrics" +``` + +## Using with the File output + +You can use the file output when running telegraf on a machine with a Splunk forwarder. + +A sample event when `hec_routing` is false (or unset) looks like: +```javascript +{ + "_value": 0.6, + "cpu": "cpu0", + "dc": "mobile", + "metric_name": "cpu.usage_user", + "user": "ronnocol", + "time": 1529708430 +} +``` +Data formatted in this manner can be ingested with a simple `props.conf` file that +looks like this: + +```ini +[telegraf] +category = Metrics +description = Telegraf Metrics +pulldown_type = 1 +DATETIME_CONFIG = +NO_BINARY_CHECK = true +SHOULD_LINEMERGE = true +disabled = false +INDEXED_EXTRACTIONS = json +KV_MODE = none +TIMESTAMP_FIELDS = time +TIME_FORMAT = %s.%3N +``` + +An example configuration of a file based output is: + +```toml + # Send telegraf metrics to file(s) +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "splunkmetric" + hec_routing = false +``` diff --git a/content/telegraf/v1.24/get_started.md b/content/telegraf/v1.24/get_started.md new file mode 100644 index 000000000..bf8e4dd49 --- /dev/null +++ b/content/telegraf/v1.24/get_started.md @@ -0,0 +1,120 @@ +--- +title: Get started +description: Configure and start Telegraf +menu: + telegraf_1_24: + name: Get started + weight: 25 +--- + +After you've [downloaded and installed Telegraf](/telegraf/v1.23/install/), you're ready to begin collecting and sending data. To collect and send data, do the following: + +1. [Configure Telegraf](#configure-telegraf) +2. [Start Telegraf](#start-telegraf) +3. Use [plugins available in Telegraf](/telegraf/v1.23/plugins/) to gather, transform, and output data. + +## Configure Telegraf + +Define which plugins Telegraf will use in the configuration file. Each configuration file needs at least one enabled [input plugin](/telegraf/v1.23/plugins/inputs/) (where the metrics come from) and at least one enabled [output plugin](/telegraf/v1.23/plugins/outputs/) (where the metrics go). + +The following example generates a sample configuration file with all available plugins, then uses `filter` flags to enable specific plugins. + +{{% note %}} +For details on `filter` and other flags, see [Telegraf commands and flags](/telegraf/v1.23/commands/). +{{% /note %}} + +1. Run the following command to create a configuration file: + ```bash + telegraf --sample-config > telegraf.conf + ``` +2. Locate the configuration file. The location varies depending on your system: + * macOS [Homebrew](http://brew.sh/): `/usr/local/etc/telegraf.conf` + * Linux debian and RPM packages: `/etc/telegraf/telegraf.conf` + * Standalone Binary: see the next section for how to create a configuration file + + > **Note:** You can also specify a remote URL endpoint to pull a configuration file from. See [Configuration file locations](/telegraf/v1.23/configuration/#configuration-file-locations). + +3. Edit the configuration file using `vim` or a text editor. Because this example uses [InfluxDB V2 output plugin](https://github.com/influxdata/telegraf/blob/release-1.21/plugins/outputs/influxdb_v2/README.md), we need to add the InfluxDB URL, authentication token, organization, and bucket details to this section of the configuration file. + + > **Note:** For more configuration file options, see [Configuration options](/telegraf/v1.23/configuration/). + +4. For this example, specify two inputs (`cpu` and `mem`) with the `--input-filter` flag. +Specify InfluxDB as the output with the `--output-filter` flag. + +```bash +telegraf --sample-config --input-filter cpu:mem --output-filter influxdb_v2 > telegraf.conf +``` + +The resulting configuration will collect CPU and memory data and sends it to InfluxDB V2. + +For an overview of how to configure a plugin, watch the following video: + +{{< youtube a0js7wiQEJ4 >}} + + +## Set environment variables + +Add environment variables anywhere in the configuration file by prepending them with `$`. +For strings, variables must be in quotes (for example, `"$STR_VAR"`). +For numbers and Booleans, variables must be unquoted (for example, `$INT_VAR`, `$BOOL_VAR`). + +You can also set environment variables using the Linux `export` command: `export password=mypassword` + +> **Note:** We recommend using environment variables for sensitive information. + +### Example: Telegraf environment variables + +In the Telegraf environment variables file (`/etc/default/telegraf`): + +```sh +USER="alice" +INFLUX_URL="http://localhost:8086" +INFLUX_SKIP_DATABASE_CREATION="true" +INFLUX_PASSWORD="monkey123" +``` + +In the Telegraf configuration file (`/etc/telegraf.conf`): + +```sh +[global_tags] + user = "${USER}" + +[[inputs.mem]] + +[[outputs.influxdb]] + urls = ["${INFLUX_URL}"] + skip_database_creation = ${INFLUX_SKIP_DATABASE_CREATION} + password = "${INFLUX_PASSWORD}" +``` + +The environment variables above add the following configuration settings to Telegraf: + +```sh +[global_tags] + user = "alice" + +[[outputs.influxdb]] + urls = "http://localhost:8086" + skip_database_creation = true + password = "monkey123" + +``` + +## Start Telegraf + +Next, you need to start the Telegraf service and direct it to your configuration file: + +### macOS [Homebrew](http://brew.sh/) +```bash +telegraf --config telegraf.conf +``` + +### Linux (sysvinit and upstart installations) +```bash +sudo service telegraf start +``` + +### Linux (systemd installations) +```bash +systemctl start telegraf +``` diff --git a/content/telegraf/v1.24/glossary.md b/content/telegraf/v1.24/glossary.md new file mode 100644 index 000000000..a2f984da3 --- /dev/null +++ b/content/telegraf/v1.24/glossary.md @@ -0,0 +1,106 @@ +--- +title: Telegraf glossary +description: This section includes definitions of important terms for related to Telegraf. +menu: + telegraf_1_24_ref: + + name: Glossary + weight: 79 +--- + +## agent + +An agent is the core part of Telegraf that gathers metrics from the declared input plugins and sends metrics to the declared output plugins, based on the plugins enabled by the given configuration. + +Related entries: [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin) + +## aggregator plugin + +Aggregator plugins receive raw metrics from input plugins and create aggregate metrics from them. +The aggregate metrics are then passed to the configured output plugins. + +Related entries: [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin), [processor plugin](/telegraf/v1.15/concepts/glossary/#processor-plugin) + +## batch size + +The Telegraf agent sends metrics to output plugins in batches, not individually. +The batch size controls the size of each write batch that Telegraf sends to the output plugins. + +Related entries: [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin) + +## collection interval + +The default global interval for collecting data from each input plugin. +The collection interval can be overridden by each individual input plugin's configuration. + +Related entries: [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin) + +## collection jitter + +Collection jitter is used to prevent every input plugin from collecting metrics simultaneously, which can have a measurable effect on the system. +Each collection interval, every input plugin will sleep for a random time between zero and the collection jitter before collecting the metrics. + +Related entries: [collection interval](/telegraf/v1.15/concepts/glossary/#collection-interval), [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin) + +## external plugin + +Programs built outside of Telegraf that run through the `execd` plugin. Provides flexibility to add functionality that doesn't exist in internal Telegraf plugins. +## flush interval + +The global interval for flushing data from each output plugin to its destination. +This value should not be set lower than the collection interval. + +Related entries: [collection interval](/telegraf/v1.15/concepts/glossary/#collection-interval), [flush jitter](/telegraf/v1.15/concepts/glossary/#flush-jitter), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin) + +## flush jitter + +Flush jitter is used to prevent every output plugin from sending writes simultaneously, which can overwhelm some data sinks. +Each flush interval, every output plugin will sleep for a random time between zero and the flush jitter before emitting metrics. +This helps smooth out write spikes when running a large number of Telegraf instances. + +Related entries: [flush interval](/telegraf/v1.15/concepts/glossary/#flush-interval), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin) + +## input plugin + +Input plugins actively gather metrics and deliver them to the core agent, where aggregator, processor, and output plugins can operate on the metrics. +In order to activate an input plugin, it needs to be enabled and configured in Telegraf's configuration file. + +Related entries: [aggregator plugin](/telegraf/v1.15/concepts/glossary/#aggregator-plugin), [collection interval](/telegraf/v1.15/concepts/glossary/#collection-interval), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin), [processor plugin](/telegraf/v1.15/concepts/glossary/#processor-plugin) + +## metric buffer + +The metric buffer caches individual metrics when writes are failing for an output plugin. +Telegraf will attempt to flush the buffer upon a successful write to the output. +The oldest metrics are dropped first when this buffer fills. + +Related entries: [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin) + +## output plugin + +Output plugins deliver metrics to their configured destination. In order to activate an output plugin, it needs to be enabled and configured in Telegraf's configuration file. + +Related entries: [aggregator plugin](/telegraf/v1.15/concepts/glossary/#aggregator-plugin), [flush interval](/telegraf/v1.15/concepts/glossary/#flush-interval), [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin), [processor plugin](/telegraf/v1.15/concepts/glossary/#processor-plugin) + +## precision + +The precision configuration setting determines how much timestamp precision is retained in the points received from input plugins. All incoming timestamps are truncated to the given precision. +Telegraf then pads the truncated timestamps with zeros to create a nanosecond timestamp; output plugins will emit timestamps in nanoseconds. +Valid precisions are `ns`, `us` or `µs`, `ms`, and `s`. + +For example, if the precision is set to `ms`, the nanosecond epoch timestamp `1480000000123456789` would be truncated to `1480000000123` in millisecond precision and then padded with zeroes to make a new, less precise nanosecond timestamp of `1480000000123000000`. +Output plugins do not alter the timestamp further. The precision setting is ignored for service input plugins. + +Related entries: [aggregator plugin](/telegraf/v1.15/concepts/glossary/#aggregator-plugin), [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin), [processor plugin](/telegraf/v1.15/concepts/glossary/#processor-plugin), [service input plugin](/telegraf/v1.15/concepts/glossary/#service-input-plugin) + +## processor plugin + +Processor plugins transform, decorate, and/or filter metrics collected by input plugins, passing the transformed metrics to the output plugins. + +Related entries: [aggregator plugin](/telegraf/v1.15/concepts/glossary/#aggregator-plugin), [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin) + +## service input plugin + +Service input plugins are input plugins that run in a passive collection mode while the Telegraf agent is running. +They listen on a socket for known protocol inputs, or apply their own logic to ingested metrics before delivering them to the Telegraf agent. + +Related entries: [aggregator plugin](/telegraf/v1.15/concepts/glossary/#aggregator-plugin), [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin), [processor plugin](/telegraf/v1.15/concepts/glossary/#processor-plugin) diff --git a/content/telegraf/v1.24/install.md b/content/telegraf/v1.24/install.md new file mode 100644 index 000000000..e6a6a34bf --- /dev/null +++ b/content/telegraf/v1.24/install.md @@ -0,0 +1,356 @@ +--- +title: Install Telegraf +description: Install Telegraf on your operating system. +menu: + telegraf_1.23: + + name: Install + weight: 20 +aliases: +- /telegraf/v1.23/introduction/installation/ +--- + +This page provides directions for installing, starting, and configuring Telegraf. To install Telegraf, do the following: + +1. [Download Telegraf](#download) +2. [Review requirements](#requirements) +3. [Complete the installation](#installation) + +## Download + +Download the latest Telegraf release at the [InfluxData download page](https://portal.influxdata.com/downloads). + +## Requirements + +Installation of the Telegraf package may require `root` or administrator privileges in order to complete successfully. + +### Networking + +Telegraf offers multiple service [input plugins](/telegraf/v1.23/plugins/inputs/) that may +require custom ports. +Modify port mappings through the configuration file (`telegraf.conf`). + +For Linux distributions, this file is located at `/etc/telegraf` for default installations. + +For Windows distributions, the configuration file is located in the directory where you unzipped the Telegraf ZIP archive. +The default location is `C:\InfluxData\telegraf`. + +### NTP + +Telegraf uses a host's local time in UTC to assign timestamps to data. +Use the Network Time Protocol (NTP) to synchronize time between hosts. If hosts' clocks +aren't synchronized with NTP, the timestamps on the data might be inaccurate. + +## Installation + +{{< tabs-wrapper >}} +{{% tabs style="even-wrap" %}} + [Ubuntu & Debian](#) + [RedHat & CentOS](#) + [SLES & openSUSE](#) + [FreeBSD/PC-BSD](#) + [macOS](#) + [Windows](#) +{{% /tabs %}} + +{{% tab-content %}} +Debian and Ubuntu users can install the latest stable version of Telegraf using the `apt-get` package manager. + +### Ubuntu & Debian + +Install Telegraf from the InfluxData repository with the following commands: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[wget](#) +[curl](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```bash +# influxdb.key GPG Fingerprint: 05CE15085FC09D18E99EFB22684A14CF2582E0C5 +wget -q https://repos.influxdata.com/influxdb.key +echo '23a1c8836f0afc5ed24e0486339d7cc8f6790b83886c4c96995b88a061c5bb5d influxdb.key' | sha256sum -c && cat influxdb.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdb.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdb.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list +sudo apt-get update && sudo apt-get install telegraf +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```bash +# influxdb.key GPG Fingerprint: 05CE15085FC09D18E99EFB22684A14CF2582E0C5 +curl -s https://repos.influxdata.com/influxdb.key > influxdb.key +echo '23a1c8836f0afc5ed24e0486339d7cc8f6790b83886c4c96995b88a061c5bb5d influxdb.key' | sha256sum -c && cat influxdb.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdb.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdb.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list +sudo apt-get update && sudo apt-get install telegraf +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +**Install from a `.deb` file**: + +To manually install the Debian package from a `.deb` file: + +1. Download the latest Telegraf `.deb` release + from the Telegraf section of the [downloads page](https://influxdata.com/downloads/). +2. Run the following command (making sure to supply the correct version number for the downloaded file): + + ```sh + sudo dpkg -i telegraf_{{< latest-patch >}}-1_amd64.deb + ``` + +{{% telegraf/verify %}} + +## Configuration + +### Create a configuration file with default input and output plugins. + +Every plugin will be in the file, but most will be commented out. + +``` +telegraf config > telegraf.conf +``` + +### Create a configuration file with specific inputs and outputs +``` +telegraf --input-filter [:] --output-filter [:] config > telegraf.conf +``` + +For more advanced configuration details, see the +[configuration documentation](/telegraf/v1.23/administration/configuration/). +{{% /tab-content %}} + +{{% tab-content %}} +For instructions on how to manually install the RPM package from a file, please see the [downloads page](https://influxdata.com/downloads/). + +**RedHat and CentOS:** Install the latest stable version of Telegraf using the `yum` package manager: + +```bash +cat < telegraf.conf +``` + +### Create a configuration file with specific inputs and outputs +``` +telegraf --input-filter [:] --output-filter [:] config > telegraf.conf +``` + +For more advanced configuration details, see the +[configuration documentation](/telegraf/v1.23/administration/configuration/). +{{% /tab-content %}} + +{{% tab-content %}} +There are RPM packages provided by openSUSE Build Service for SUSE Linux users: + +```bash +# add go repository +zypper ar -f obs://devel:languages:go/ go +# install latest telegraf +zypper in telegraf +``` + +{{% telegraf/verify %}} + +## Configuration + +### Create a configuration file with default input and output plugins. + +Every plugin will be in the file, but most will be commented out. + +``` +telegraf config > telegraf.conf +``` + +### Create a configuration file with specific inputs and outputs +``` +telegraf --input-filter [:] --output-filter [:] config > telegraf.conf +``` + +For more advanced configuration details, see the +[configuration documentation](/telegraf/v1.23/administration/configuration/). +{{% /tab-content %}} + +{{% tab-content %}} +Telegraf is part of the FreeBSD package system. +It can be installed by running: + +```bash +sudo pkg install telegraf +``` + +The configuration file is located at `/usr/local/etc/telegraf.conf` with examples in `/usr/local/etc/telegraf.conf.sample`. + +{{% telegraf/verify %}} + +## Configuration + +### Create a configuration file with default input and output plugins. + +Every plugin will be in the file, but most will be commented out. + +``` +telegraf config > telegraf.conf +``` + +### Create a configuration file with specific inputs and outputs +``` +telegraf --input-filter [:] --output-filter [:] config > telegraf.conf +``` + +For more advanced configuration details, see the +[configuration documentation](/telegraf/v1.23/administration/configuration/). +{{% /tab-content %}} + +{{% tab-content %}} +Users of macOS 10.8 and higher can install Telegraf using the [Homebrew](http://brew.sh/) package manager. +Once `brew` is installed, you can install Telegraf by running: + +```bash +brew update +brew install telegraf +``` + +To have launchd start telegraf at next login: +``` +ln -sfv /usr/local/opt/telegraf/*.plist ~/Library/LaunchAgents +``` +To load telegraf now: +``` +launchctl load ~/Library/LaunchAgents/homebrew.mxcl.telegraf.plist +``` + +Or, if you don't want/need launchctl, you can just run: +``` +telegraf -config /usr/local/etc/telegraf.conf +``` + +{{% telegraf/verify %}} + +## Configuration + +### Create a configuration file with default input and output plugins. + +Every plugin will be in the file, but most will be commented out. + +``` +telegraf config > telegraf.conf +``` + +### Create a configuration file with specific inputs and outputs +``` +telegraf --input-filter [:] --output-filter [:] config > telegraf.conf +``` + +For more advanced configuration details, see the +[configuration documentation](/telegraf/v1.23/configuration/). +{{% /tab-content %}} + +{{% tab-content %}} + +#### Download and run Telegraf as a Windows service + +{{% note %}} +Installing a Windows service requires administrative permissions. +To run PowerShell as an administrator, +see [Launch PowerShell as administrator](https://docs.microsoft.com/en-us/powershell/scripting/windows-powershell/starting-windows-powershell?view=powershell-7#with-administrative-privileges-run-as-administrator). +{{% /note %}} + +In PowerShell _as an administrator_, do the following: + +1. Use the following commands to download the Telegraf Windows binary + and extract its contents to `C:\Program Files\InfluxData\telegraf\`: + + ```powershell + > wget https://dl.influxdata.com/telegraf/releases/telegraf-{{% latest-patch %}}_windows_amd64.zip -UseBasicParsing -OutFile telegraf-{{< latest-patch >}}_windows_amd64.zip + > Expand-Archive .\telegraf-{{% latest-patch %}}_windows_amd64.zip -DestinationPath 'C:\Program Files\InfluxData\telegraf\' + ``` + +2. Move the `telegraf.exe` and `telegraf.conf` files from + `C:\Program Files\InfluxData\telegraf\telegraf-{{% latest-patch %}}` + up a level to `C:\Program Files\InfluxData\telegraf`: + + ```powershell + > cd "C:\Program Files\InfluxData\telegraf" + > mv .\telegraf-{{% latest-patch %}}\telegraf.* . + ``` + + Or create a [Windows symbolic link (Symlink)](https://blogs.windows.com/windowsdeveloper/2016/12/02/symlinks-windows-10/) + to point to this directory. + + > The instructions below assume that either the `telegraf.exe` and `telegraf.conf` files are stored in `C:\Program Files\InfluxData\telegraf`, or you've created a Symlink to point to this directory. + +3. Install Telegraf as a service: + + ```powershell + > .\telegraf.exe --service install --config "C:\Program Files\InfluxData\telegraf\telegraf.conf" + ``` + + Make sure to provide the absolute path of the `telegraf.conf` configuration file, + otherwise the Windows service may fail to start. + +4. To test that the installation works, run: + + ```powershell + > C:\"Program Files"\InfluxData\telegraf\telegraf.exe --config C:\"Program Files"\InfluxData\telegraf\telegraf.conf --test + ``` + +5. To start collecting data, run: + + ```powershell + telegraf.exe --service start + ``` + + + +### Logging and troubleshooting + +When Telegraf runs as a Windows service, Telegraf logs messages to Windows event logs. +If the Telegraf service fails to start, view error logs by selecting **Event Viewer**→**Windows Logs**→**Application**. + +### Windows service commands + +The following commands are available: + +| Command | Effect | +|------------------------------------|-------------------------------| +| `telegraf.exe --service install` | Install telegraf as a service | +| `telegraf.exe --service uninstall` | Remove the telegraf service | +| `telegraf.exe --service start` | Start the telegraf service | +| `telegraf.exe --service stop` | Stop the telegraf service | + +{{< /tab-content >}} +{{< /tabs-wrapper >}} diff --git a/content/telegraf/v1.24/metrics.md b/content/telegraf/v1.24/metrics.md new file mode 100644 index 000000000..ea64c820d --- /dev/null +++ b/content/telegraf/v1.24/metrics.md @@ -0,0 +1,29 @@ +--- +title: Telegraf metrics +description: Telegraf metrics are internal representations used to model data during processing and are based on InfluxDB's data model. Each metric component includes the measurement name, tags, fields, and timestamp. +menu: + telegraf_1.23: + name: Metrics + weight: 10 + parent: Concepts + draft: true +--- + +Telegraf metrics are the internal representation used to model data during +processing. These metrics are closely based on InfluxDB's data model and contain +four main components: + +- **Measurement name**: Description and namespace for the metric. +- **Tags**: Key/Value string pairs and usually used to identify the + metric. +- **Fields**: Key/Value pairs that are typed and usually contain the + metric data. +- **Timestamp**: Date and time associated with the fields. + +This metric type exists only in memory and must be converted to a concrete +representation in order to be transmitted or viewed. Telegraf provides [output data formats][output data formats] (also known as *serializers*) for these conversions. Telegraf's default serializer converts to [InfluxDB Line +Protocol][line protocol], which provides a high performance and one-to-one +direct mapping from Telegraf metrics. + +[output data formats]: /telegraf/v1.23/data_formats/output/ +[line protocol]: /telegraf/v1.23/data_formats/output/influx/ diff --git a/content/telegraf/v1.24/plugins.md b/content/telegraf/v1.24/plugins.md new file mode 100644 index 000000000..e62ef4bc0 --- /dev/null +++ b/content/telegraf/v1.24/plugins.md @@ -0,0 +1,82 @@ +--- +title: Plugin directory +description: > + Telegraf is a plugin-driven agent that collects, processes, aggregates, and writes metrics. + It supports four categories of plugins including input, output, aggregator, and processor. + View and search all available Telegraf plugins. +menu: + telegraf_1_24_ref: + + weight: 10 +weight: 6 +aliases: + + - /telegraf/v1.23/plugins/plugins-list/ + - /telegraf/v1.23/plugins/aggregators/ + - /telegraf/v1.23/plugins/inputs/ + - /telegraf/v1.23/plugins/outputs/ + - /telegraf/v1.23/plugins/processors/ + - /telegraf/v1.22/plugins/plugins-list/ + - /telegraf/v1.22/plugins/aggregators/ + - /telegraf/v1.22/plugins/inputs/ + - /telegraf/v1.22/plugins/outputs/ + - /telegraf/v1.22/plugins/processors/ + - /telegraf/v1.21/plugins/plugins-list/ + - /telegraf/v1.21/plugins/aggregators/ + - /telegraf/v1.21/plugins/inputs/ + - /telegraf/v1.21/plugins/outputs/ + - /telegraf/v1.21/plugins/processors/ + - /telegraf/v1.20/plugins/plugins-list/ + - /telegraf/v1.20/plugins/aggregators/ + - /telegraf/v1.20/plugins/inputs/ + - /telegraf/v1.20/plugins/outputs/ + - /telegraf/v1.20/plugins/processors/ + - /telegraf/v1.19/plugins/plugins-list/ + - /telegraf/v1.19/plugins/aggregators/ + - /telegraf/v1.19/plugins/inputs/ + - /telegraf/v1.19/plugins/outputs/ + - /telegraf/v1.19/plugins/processors/ + - /telegraf/v1.18/plugins/plugins-list/ + - /telegraf/v1.18/plugins/aggregators/ + - /telegraf/v1.18/plugins/inputs/ + - /telegraf/v1.18/plugins/outputs/ + - /telegraf/v1.18/plugins/processors/ + - /telegraf/v1.17/plugins/plugins-list/ + - /telegraf/v1.17/plugins/aggregators/ + - /telegraf/v1.17/plugins/inputs/ + - /telegraf/v1.17/plugins/outputs/ + - /telegraf/v1.17/plugins/processors/ +--- + +Telegraf is a plugin-driven agent that collects, processes, aggregates, and writes metrics. +It supports four categories of plugins including input, output, aggregator, processor, and external. + +{{< list-filters >}} + +**Jump to:** + +- [Input plugins](#input-plugins) +- [Output plugins](#output-plugins) +- [Aggregator plugins](#aggregator-plugins) +- [Processor plugins](#processor-plugins) + +## Input plugins +Telegraf input plugins are used with the InfluxData time series platform to collect +metrics from the system, services, or third-party APIs. + +{{< telegraf/plugins type="input" >}} + +## Output plugins +Telegraf processor plugins write metrics to various destinations. + +{{< telegraf/plugins type="output" >}} + +## Aggregator plugins +Telegraf aggregator plugins create aggregate metrics (for example, mean, min, max, quantiles, etc.) + +{{< telegraf/plugins type="aggregator" >}} + +## Processor plugins +Telegraf output plugins transform, decorate, and filter metrics. + +{{< telegraf/plugins type="processor" >}} diff --git a/content/telegraf/v1.24/release-notes-changelog.md b/content/telegraf/v1.24/release-notes-changelog.md new file mode 100644 index 000000000..b156287b3 --- /dev/null +++ b/content/telegraf/v1.24/release-notes-changelog.md @@ -0,0 +1,4198 @@ +--- +title: Telegraf 1.23 release notes +description: Important features and changes in the latest version of Telegraf. +aliases: + - /telegraf/v1.23/reference/release-notes/influxdb/ + - /telegraf/v1.23/about_the_project/release-notes-changelog/ +menu: + telegraf_1_24_ref: + + name: Release notes + weight: 60 +--- +## v1.24.0 [2022-09-12] + +### Breaking change + +- Set default minimum TLS version to v1.2 for security reasons on both server and client connections. +This is a change from the previous defaults (TLS v1.0) on the server configuration and might break clients relying on older TLS versions. +Older versions can be manually reverted on a per-plugin basis using the `tls_min_version` option in the plugins required. + +### Features + +- Create custom builder to scan a Telegraf configuration file for the plugin files being defined and builds a new binary only including these plugins. +- Add license checking tool. +- Add metrics for member and replica-set average health of MongoDB. +- Allow collecting node-level metrics for Couchbase buckets. +- Make `config` subcommand. + +### Bug fixes + +- Add version number to MacOS packages. +- Backport sync `sample.conf` and `README.md` files. +- Fix to parsing errors in Datadog mode. +- Clean up after Redis merge. +- Refactor Telegraf version. +- Remove shell execution for `license-checker`. + +### New plugins + +#### Inputs +- [AWS CloudWatch Metric Streams](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cloudwatch_metric_streams) (`cloudwatch_metric_streams`) - Contributed by [@mccabecillian](https://github.com/mccabecillian). +- [Linux CPU](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/linux_cpu)(`linux_cpu`) - Contributed by [@fabianishere](http://github.com/fabianishere). +- [NSDP](https://github.com/hdecarne-github/nsdp-telegraf-plugin) (`nsdp`) - Contributed by [@hdecarne](https://github.com/@hdecarne). +- [Supervisor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/supervisor) (`supervisor`) - Contributed by [@niasar](http://github.com/niasar). +- [UPSD](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/upsd) (`upsd`) - Contributed by [@Malinskiy](http://github.com/Malinskiy). + +#### Outputs +- [PostgreSQL](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/postgresql) (`postgresql`) - Contributed by [@phemmer](https://github.com/phemmer). +- [RedisTimeSeries](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/redistimeseries) (`redistimeseries`) - Contributed by [@gkorland](http://github.com/gkorland). +- [Stomp (Active MQ)](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/stomp) - Contributed by [@amus-sal](http://github.com/amus-sal). + +#### Serializers +- [CSV](https://github.com/influxdata/telegraf/tree/master/plugins/serializers/csv) (`csv`) - Contributed by [@influxdata](http://github.com/influxdata). + + +### Input plugin updates + +- Nats Consumer (`nats_consumer`): Add simple support for jetstream subjects. +- Cisco Telemetry MDT (`cisco_telemetry_mdt`): Add GRPC Keepalive/timeout configuration options. +- Directory Monitor (`directory_monitor`): + - Support paths for `files_to_ignore` and `files_to_monitor`. + - Traverse subdirectories. +- Kafka Consumer (`kafka_consumer`): Option to set default fetch message bytes. +- Linux CPU (`linux_cpu`): Add plugin to collect CPU metrics on Linux. +- Logstash (`logstash`): Record number of failures. +- Modbus (`modbus`): Error out on requests with no fields defined. +- MQTT Consumer (`mqtt_consumer`): Add incoming MQTT message size calculation. +- Nginx Plus API (`nginx_plus_api`) Gather `limit_reqs` metrics. +- NTPQ (`ntpq`): + - Add option to specify command flags. + - Add possibility to query remote servers. + - Allow to specify `reach` output format. +- Openstack (`openstack`): Add `allow_reauth` configuration option. +- Smart (`smart`): Collect SSD endurance information where available in `smartctl`. +- SQL Server (`sqlserver`): + - Add database name to IO stats for MI. + - Improved filtering for active requests. + - Fix filtering for `sqlAzureMIRequests` and s`qlAzureDBRequests`. +- StatsD (`statsd`): Add median timing calculation. +- Syslog (`syslog`): Log remote host as source tag. +- x509 Cert (`x509_cert`): + - Add SMTP protocol. + - Add proxy support. + - Multiple sources with non-overlapping DNS entries. +- RabbitMQ (`rabbitmq`): Add support for `head_message_timestamp` metric. +- Redis (`redis`): Add Redis 6 ACL authorization support. +- Jolokia 2 (`jolokia2`): Add optional origin header. +- MongoDB (`mongodb`): Add an option to bypass connection errors on start. +- OPCUA (`opcua`): Assign node ID correctly. +- Prometheus (`prometheus`): Run outside Kubernetes cluster error. +- UPSD (`upsd`): Move to new `sample.conf` style. + +### Output plugin updates + +- Cloudwatch (`cloudwatch`): Add proxy support. +- MQTT (`mqtt`): Add support for MQTT protocol version 5. +- AMQP (`amqp`): Add proxy support. +- Graphite (`graphite`): Retry connecting to servers with failed send attempts. +- Groundwork (`groundwork`): + - Improve metric parsing to extend output. + - Add default appType as configuration option. +- Redis Time Series (`redistimeseries`): Add integration test +- SQL (`sql`): Add settings for Go `sql.DB` settings. +- ExecD (`execd`): Fix error when partially unserializable metrics are written. +- Wavefront (`wavefront`): Update Wavefront SDK and use non-deprecated APIs. + + +### Serializer updates +- JSON (`json`): Add new `json_transformation` option transform outputted JSON. This new option can be used to transform the JSON output using the JSONata language to accommodate for requirements on the receiver side. The setting can also filter and process JSON data points. +- Prometheus (`prometheus`): + - Provide option to reduce payload size by removing HELP from payload + - Sort labels in prometheusremotewrite serializer + +### Parser updates +- Migrate parsers to new style. +- XPath (`xpath`): Add support for returning underlying data types. +- CSV (`csv`): Add `reset-mode` flag. + +### Processor updates +- Starlark (`starlark`): Add benchmark for tag concatenation. + +### Dependency updates + +- Update `github.com/jackc/pgx/v4` from 4.16.1 to 4.17.0. +- Update `github.com/Azure/go-autorest/autorest` from 0.11.24 to 0.11.28. +- Update `github.com/aws/aws-sdk-go-v2/service/ec2` from 1.51.2 to 1.52.1 +- Update `github.com/urfave/cli/v2` from 2.3.0 to 2.11.2. +- Update `github.com/aws/aws-sdk-go-v2/service/timestreamwrite` from 1.13.6 to 1.13.12. +- Update `github.com/aliyun/alibaba-cloud-sdk-go` from 1.61.1695 to 1.61.1727. +- Update `go.mongodb.org/mongo-driver` from 1.9.1 to 1.10.1. +- Update `github.com/wavefronthq/wavefront-sdk-go` from 0.10.1 to 0.10.2. +- Update `github.com/aws/aws-sdk-go-v2/service/sts` from 1.16.7 to 1.16.13. +- Update `github.com/aerospike/aerospike-client-go/v5` from 5.7.0 to 5.9.0. +- Update `github.com/hashicorp/consul/api` from 1.13.1 to 1.14.0. +- Update `github.com/tidwall/gjson` from 1.14.1 to 1.14.3. +- Update `github.com/rabbitmq/amqp091-go` from 1.3.4 to 1.4.0. +- Update `github.com/aws/aws-sdk-go-v2/service/dynamodb` from 1.15.10 to 1.16.1. +- Update `github.com/gophercloud/gophercloud` from 0.25.0 to 1.0.0. +- Update `k8s.io/client-go` from 0.24.3 to 0.25.0. +- Update `github.com/aws/aws-sdk-go-v2/feature/ec2/imds` from 1.12.11 to 1.12.13. +- Update `github.com/urfave/cli/v2` from 2.11.2 to 2.14.1. +- Update `gonum.org/v1/gonum` from 0.11.0 to 0.12.0. +- Update `github.com/Azure/azure-kusto-go` from 0.7.0 to 0.8.0. +- Update `google.golang.org/grpc` from 1.48.0 to 1.49.0. + + + +## v1.23.4 [2022-08-16] + +### Bugfixes + +- [#11647](https://github.com/influxdata/telegraf/pull/11647) Bump github.com/lxc/lxd to be able to run tests +- [#11664](https://github.com/influxdata/telegraf/pull/11664) Sync sql output and input build constraints to handle loong64 in go1.19. +- [#10841](https://github.com/influxdata/telegraf/pull/10841) Updating credentials file to not use endpoint_url parameter +- [#10851](https://github.com/influxdata/telegraf/pull/10851) `inputs.cloudwatch` Customizable batch size when querying +- [#11577](https://github.com/influxdata/telegraf/pull/11577) `inputs.kube_inventory` Send file location to enable token auto-refresh +- [#11578](https://github.com/influxdata/telegraf/pull/11578) `inputs.kubernetes` Refresh token from file at each read +- [#11635](https://github.com/influxdata/telegraf/pull/11635) `inputs.mongodb` Update version check for newer versions +- [#11539](https://github.com/influxdata/telegraf/pull/11539) `inputs.opcua` Return an error with mismatched types +- [#11548](https://github.com/influxdata/telegraf/pull/11548) `inputs.sqlserver` Set lower deadlock priority +- [#11556](https://github.com/influxdata/telegraf/pull/11556) `inputs.stackdriver` Handle when no buckets available +- [#11576](https://github.com/influxdata/telegraf/pull/11576) `inputs` Linter issues +- [#11595](https://github.com/influxdata/telegraf/pull/11595) `outputs` Linter issues +- [#11607](https://github.com/influxdata/telegraf/pull/11607) `parsers` Linter issues + +### Features + +- [#11622](https://github.com/influxdata/telegraf/pull/11622) Add coralogix dialect to opentelemetry + +### Dependency Updates + +- [#11412](https://github.com/influxdata/telegraf/pull/11412) `deps` Bump github.com/testcontainers/testcontainers-go from 0.12.0 to 0.13.0 +- [#11565](https://github.com/influxdata/telegraf/pull/11565) `deps` Bump github.com/apache/thrift from 0.15.0 to 0.16.0 +- [#11567](https://github.com/influxdata/telegraf/pull/11567) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.46.0 to 1.51.0 +- [#11494](https://github.com/influxdata/telegraf/pull/11494) `deps` Update all go.opentelemetry.io dependencies +- [#11569](https://github.com/influxdata/telegraf/pull/11569) `deps` Bump github.com/go-ldap/ldap/v3 from 3.4.1 to 3.4.4 +- [#11574](https://github.com/influxdata/telegraf/pull/11574) `deps` Bump github.com/karrick/godirwalk from 1.16.1 to 1.17.0 +- [#11568](https://github.com/influxdata/telegraf/pull/11568) `deps` Bump github.com/vmware/govmomi from 0.28.0 to 0.29.0 +- [#11347](https://github.com/influxdata/telegraf/pull/11347) `deps` Bump github.com/eclipse/paho.mqtt.golang from 1.3.5 to 1.4.1 +- [#11580](https://github.com/influxdata/telegraf/pull/11580) `deps` Bump github.com/shirou/gopsutil/v3 from 3.22.4 to 3.22.7 +- [#11582](https://github.com/influxdata/telegraf/pull/11582) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs +- [#11583](https://github.com/influxdata/telegraf/pull/11583) `deps` Bump github.com/Azure/go-autorest/autorest/adal +- [#11581](https://github.com/influxdata/telegraf/pull/11581) `deps` Bump github.com/pion/dtls/v2 from 2.0.13 to 2.1.5 +- [#11590](https://github.com/influxdata/telegraf/pull/11590) `deps` Bump github.com/Azure/azure-event-hubs-go/v3 +- [#11586](https://github.com/influxdata/telegraf/pull/11586) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch +- [#11585](https://github.com/influxdata/telegraf/pull/11585) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis +- [#11584](https://github.com/influxdata/telegraf/pull/11584) `deps` Bump github.com/aws/aws-sdk-go-v2/service/dynamodb +- [#11598](https://github.com/influxdata/telegraf/pull/11598) `deps` Bump github.com/signalfx/golib/v3 from 3.3.43 to 3.3.45 +- [#11605](https://github.com/influxdata/telegraf/pull/11605) `deps` Update github.com/BurntSushi/toml from 0.4.1 to 1.2.0 +- [#11604](https://github.com/influxdata/telegraf/pull/11604) `deps` Update cloud.google.com/go/pubsub from 1.23.0 to 1.24.0 +- [#11602](https://github.com/influxdata/telegraf/pull/11602) `deps` Update k8s.io/apimachinery from 0.24.2 to 0.24.3 +- [#11603](https://github.com/influxdata/telegraf/pull/11603) `deps` Update github.com/Shopify/sarama from 1.34.1 to 1.35.0 +- [#11616](https://github.com/influxdata/telegraf/pull/11616) `deps` Bump github.com/sirupsen/logrus from 1.8.1 to 1.9.0 +- [#11636](https://github.com/influxdata/telegraf/pull/11636) `deps` Bump github.com/emicklei/go-restful from v2.9.5+incompatible to v3.8.0 +- [#11641](https://github.com/influxdata/telegraf/pull/11641) `deps` Bump github.com/hashicorp/consul/api from 1.12.0 to 1.13.1 +- [#11640](https://github.com/influxdata/telegraf/pull/11640) `deps` Bump github.com/prometheus/client_golang from 1.12.2 to 1.13.0 +- [#11643](https://github.com/influxdata/telegraf/pull/11643) `deps` Bump google.golang.org/api from 0.85.0 to 0.91.0 +- [#11644](https://github.com/influxdata/telegraf/pull/11644) `deps` Bump github.com/antchfx/xmlquery from 1.3.9 to 1.3.12 +- [#11651](https://github.com/influxdata/telegraf/pull/11651) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 +- [#11652](https://github.com/influxdata/telegraf/pull/11652) `deps` Bump github.com/aws/aws-sdk-go-v2/feature/ec2/imds +- [#11653](https://github.com/influxdata/telegraf/pull/11653) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs + +## v1.23.4 [2022-08-16] + +- Bump `github.com/lxc/lxd` to be able to run tests. +- Sync sql output and input build constraints to handle loong64 in go1.19. +- Update credentials file to not use `endpoint_url` parameter +- Fixes to linter issues +- Add Coralogix dialect to open telemetry. + +## Input plugin updates + +- Cloudwatch (`cloudwatch`): Customizable batch size when querying. +- Kube Inventory (`kube_inventory`): Send file location to enable token auto-refresh. +- Kubernetes (`kubernetes`): Refresh token from file at each read. +- MongoDB (`mongodb`): Update to most recent version. +- OPC UA (`opcua`): Return an error with mismatched types. +- SQL server (`sqlserver`): Set lower deadlock priority. +- Stackdriver (`stackdriver`) Handle when no buckets available. + + +## Dependency Updates + +- Bump github.com/testcontainers/testcontainers-go from 0.12.0 to 0.13.0. +- Bump github.com/apache/thrift from 0.15.0 to 0.16.0. +- Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.46.0 to 1.51.0. +- Update all go.opentelemetry.io dependencies. +- Bump github.com/go-ldap/ldap/v3 from 3.4.1 to 3.4.4. +- Bump github.com/karrick/godirwalk from 1.16.1 to 1.17.0. +- Bump github.com/vmware/govmomi from 0.28.0 to 0.29.0. +- Bump github.com/eclipse/paho.mqtt.golang from 1.3.5 to 1.4.1. +- Bump github.com/shirou/gopsutil/v3 from 3.22.4 to 3.22.7. +- Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs. +- Bump github.com/Azure/go-autorest/autorest/adal. +- Bump github.com/pion/dtls/v2 from 2.0.13 to 2.1.5. +- Bump github.com/Azure/azure-event-hubs-go/v3. +- Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch. +- Bump github.com/aws/aws-sdk-go-v2/service/kinesis. +- Bump github.com/aws/aws-sdk-go-v2/service/dynamodb. +- Bump github.com/signalfx/golib/v3 from 3.3.43 to 3.3.45. +- Update github.com/BurntSushi/toml from 0.4.1 to 1.2.0. +- Update cloud.google.com/go/pubsub from 1.23.0 to 1.24.0. +- Update k8s.io/apimachinery from 0.24.2 to 0.24.3. +- Update github.com/Shopify/sarama from 1.34.1 to 1.35.0. +- Bump github.com/sirupsen/logrus from 1.8.1 to 1.9.0. +- Bump github.com/emicklei/go-restful from v2.9.5+incompatible to v3.8.0. +- Bump github.com/hashicorp/consul/api from 1.12.0 to 1.13.1. +- Bump github.com/prometheus/client_golang from 1.12.2 to 1.13.0. +- Bump google.golang.org/api from 0.85.0 to 0.91.0. +- Bump github.com/antchfx/xmlquery from 1.3.9 to 1.3.12. +- Bump github.com/aws/aws-sdk-go-v2/service/ec2. +- Bump github.com/aws/aws-sdk-go-v2/feature/ec2/imds. +- Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs. + +## v1.23.3 [2022-07-25] + +## Bug fixes +- Openstack input plugin (`inputs.openstack`): Use v3 volume library. +- MQTT Consumer input plugin (`inputs.mqtt_consumer`): Topic parsing error when topic having prefix '/'. +- SNMP Trap input plugin (`inputs.snmp_trap`): Prevent map panic when using with `netsnmp` translator. +- SQL Server input plugin (`inputs.sqlserver`): Set lower deadlock priority on queries. +- `common.cookie`: Use reader over readcloser, regenerate `cookie-jar` at reauthorization. +- Prometheus parser (`parsers.prometheus`): Histogram infinity bucket is now always present. + +## Dependency updates +- Bump `github.com/antchfx/jsonquery` from 1.1.5 to 1.2.0. + +## v1.23.2 [2022-7-11] + +## Bug fixes + +- Remove unexpected deprecation warnings for non-deprecated packages that occurred in 1.23.1. +- HTTP input plugin (`inputs.http`): Allow both 200 and 201 response codes when generating cookie authentication. Also update the cookie header docs to show a TOML map rather than a string. +- Microsoft SQL Server input plugin (`inputs.sqlserver`): Use `bigint` for `backupsize` in `sqlserver` queries. +- gNMI input plugin (`inputs.gnmi`): Refactor `tag_only` subscriptions for complex keys (such as `network-instances`) and to improve concurrrency. The subscription key is no longer hardcoded to the device name and the `name` tag. Adds ability to specify a subscription key on a per-tag basis. +- SNMP input plugin (`inputs.snmp`): Now sets gosnmp's `UseUnconnectedUDPSocket` to true when using UDP. Adds support to accept SNMP responses from any address (not just the requested address). Useful when gathering responses from redundant/failover systems. + +## Dependency updates +- Bump `github.com/docker/docker` from 20.10.14 to 20.10.17. + +## v1.23.1 [2022-7-5] + +## Bug fixes +- Jolokia2 input plugin (`jolikia2`): Resolve panic on null response. +- RabbitMQ input plugin (`rabbitmq`) Don't require listeners to be present in overview. +- Sync back `sample.confs` for Couchbuse input plugin (`couchbase`) and Groundwork output plugin (`groundwork`). +- Filter out views in MongoDB lookup. +- Fix race condition in configuration and prevent concurrent map writes to `c.UnusedFields`. +- Restore sample configurations broken during initial migration +- Sync back sample.confs for inputs.couchbase and outputs.groundwork. + +## Dependency updates +- Bump `cloud.google.com/go/monitoring` from 1.2.0 to 1.5.0. +- Bump `github.com/aws/aws-sdk-go-v2/credentials` from 1.12.2 to 1.12.5. +- Bump `google.golang.org/grpc` from 1.46.2 to 1.47.0. +- Bump `k8s.io/client-go` from 0.23.3 to 0.24.1. +- Bump `github.com/go-logfmt/logfmt` from 0.5.0 to 0.5.1. +- Bump `github.com/aws/aws-sdk-go-v2/service/dynamodb` from 1.15.3 to 1.15.7. +- Bump `go.mongodb.org/mongo-driver` from 1.9.0 to 1.9.1. +- Bump `github.com/gophercloud/gophercloud` from 0.24.0 to 0.25.0. +- Bump `google.golang.org/api` from 0.74.0 to 0.84.0. +- Bump `github.com/fatih/color` from 1.10.0 to 1.13.0. +- Bump `github.com/aws/aws-sdk-go-v2/service/timestreamwrite` from 1.3.2 to 1.13.6. +- Bump `github.com/shopify/sarama` from 1.32.0 to 1.34.1. +- Bump `github.com/dynatrace-oss/dynatrace-metric-utils-go` from 0.3.0 to 0.5.0. +- Bump `github.com/nats-io/nats.go` from 1.15.0 to 1.16.0. +- Bump `cloud.google.com/go/pubsub` from 1.18.0 to 1.22.2. +- Bump `go.opentelemetry.io/collector/pdata` from 0.52.0 to 0.54.0. +- Bump `github.com/jackc/pgx/v4` from 4.15.0 to 4.16.1. +- Bump `cloud.google.com/go/bigquery` from 1.8.0 to 1.33.0. +- Bump `github.com/Azure/azure-kusto-go` from 0.6.0 to 0.7.0. +- Bump `cloud.google.com/go/pubsub` from 1.22.2 to 1.23.0. +- Bump `github.com/aws/aws-sdk-go-v2/service/kinesis` from 1.13.0 to 1.15.7. +- Bump `github.com/aws/aws-sdk-go-v2/service/ec2` from 1.1.0 to 1.46.0. +- Bump `github.com/golang-jwt/jwt/v4` from 4.4.1 to 4.4.2. +- Bump `github.com/vmware/govmomi` from 0.27.3 to 0.28.0. +- Bump `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs` from 1.15.4 to 1.15.8. +- Bump `github.com/influxdata/influxdb-observability/otel2influx` from 0.2.21 to 0.2.22. +- Bump `k8s.io/api` from 0.24.1 to 0.24.2. +- Bump `github.com/prometheus/client_golang` from 1.12.1 to 1.12.2. + +## v1.23.0 [2022-6-13] + +- Sample configuration (`sample.conf`) files for the different plugins are now embedded into the Golang code by the Go compiler. You can now download the sample configuration from +Telegraf without having to paste in sample configurations from each plugin's README.md. +- Add missing build constraints for sqlite. +- Always build README-embedder for host-architecture. +- Avoid calling `sadc` with invalid 0 interval. +- Check `net.Listen()` error in tests. +- Add DataDog count metrics. +- Deprecate unused database configuration option. +- Document interval setting for internet speed plugin. +- Add Elasticsearch output float handling test. +- Log instance name in skip warnings. +- Output erroneous namespace and fix error. +- Remove any content type from Prometheus accept header. +- Remove full access permissions. +- Search services file in `/etc/services` and fall back to `/usr/etc/services`. +- Migrate XPath parser to new style. +- Add field key option to set event partition key +- Add semantic commits checker. +- Allow other `fluentd `metrics. +- Add Artifactory Webhook Receiver. +- Create and push nightly Docker images to quay.io. +- Fix error if no nodes found for current configuration with XPath parser. + +### New plugins + +- [Fritzbox](https://github.com/gridscale/linux-psi-telegraf-plugin/blob/main/README.md)(`fritzbox`) - Contributed by [@hdecarne](https://github.com/@hdecarne). +- [Huebridge](https://github.com/hdecarne-github/huebridge-telegraf-plugin/blob/main/README.md)(`huebridge`) - Contributed by [@hdecarne](https://github.com/@hdecarne). +- [Slab](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/slab/README.md) (`slab`) - Contributed by @bobuhiro11. + +### Input plugin updates +- Burrow (`burrow`): Move Dialer to variable and run `make fmt`. +- CPU (`cpu`): Add core and physical ID tags that contain information about physical CPU or cores in cases of hyper-threading. +- HTTP (`http`): Use readers over closers. +- Lustre (`lustre`): Support collecting per-client stats. +- Mock (`mock`) Add constant algorithm. +- Tail (`tail`): Add ANSI color filter. +- Redis (`redis`): Fix to `goroutine` leak triggered by auto-reload configuration mechanism. + +### Output plugin updates +- HTTP (`http`): Enable authentication against a Google API protected by the OAuth 2.0 protocol. +- HTTP (`elasticsearch`): Add healthcheck timeout. +- SQL (`sql`): Add table existence cache. + +## Dependency updates +- Update `github.com/wavefronthq/wavefront-sdk-go` from 0.9.10 to 0.9.11. +- Update `github.com/aws/aws-sdk-go-v2/config` from 1.15.3 to 1.15.7. +- Update `github.com/sensu/sensu-go/api/core/v2` from 2.13.0 to 2.14.0. +- Update `go.opentelemetry.io/otel/metric` from 0.28.0 to 0.30.0. +- Update `github.com/nats-io/nats-server/v2` from 2.7.4 to 2.8.4. +- Update `golangci-lint` from v1.45.2 to v1.46.2. +- Update `gopsutil` from v3.22.3 to v3.22.4 to allow for HOST_PROC_MOUNTINFO. +- Update `moby/ipvs` dependency from v1.0.1 to v1.0.2. +- Update `modernc.org/sqlite `from v1.10.8 to v1.17.3. +- Update `github.com/containerd/containerd` from v1.5.11 to v1.5.13. +- Update `github.com/tidwall/gjson` from 1.10.2 to 1.14.1. + +## v1.22.4 [2022-5-17] + +- Wait for network up in `systemd` packaging. + +### Input plugin updates +- Couchbase (`couchbase`): Do not assume metrics will all be of the same length. +- StatsD (`statsd`): Fix error when closing network connection. +- Add mount option filtering to disk plugin. + +### Output plugin updates +- Azure Monitor (`azure_monitor`): Reinitialize `http` client on context deadline error. +- Wavefront (`wavefront`): Do not add `telegraf.host` tag if no `host` tag is provided. + +### Dependency updates +- Update `github.com/showwin/speedtest-go` from 1.1.4 to 1.1.5. +- Update OpenTelemetry plugins to v0.51.0. + +## v1.22.3 [2022-4-28] + +- Update Go to 1.18.1. + +### Input plugin updates +- InfluxDB Listener (`influxdb_listener`): Remove duplicate writes with upstream parser. +- GNMI (`gnmi`): Use external xpath parser. +- System (`system`): Reduce log level back to original level. + +## v1.22.2 [2022-4-25] + +- Allow Makefile to work on Windows. +- Allow zero outputs when using `test-wait` parameter. + +## Input plugin updates +- Aerospike (`aerospike`): Fix statistics query bug. +- Aliyun CMS (`aliyuncms`): Ensure metrics accept array. +- Cisco Telemetry MDT (`cisco_telemetry_mdt`): + - Align the default value for message size. + - Remove overly verbose info message. +- GNMI (`gnmi`): + - Add mutex to lookup map. + - Use sprint to cast to strings. +- Consul agent (`consul_agent`): Use correct auth token. +- MySQL (`mysql`): Add `mariadb_dialect` to address the MariaDB differences in `INNODB_METRICS`. +- SMART (`smart`): Correctly parse various numeric forms +- Prometheus (`prometheus`): Moved from watcher to informer. + +## Output plugin updates +- InfluxDB v2 (`influxdb_v2`): Improve error message. + +## Dependency updates +- Update `github.com/Azure/azure-kusto-go` from 0.5.0 to 0.60. +- Update `opentelemetry` from v0.2.10 to v0.2.17. +- Update `go.opentelemetry.io/collector/pdata` from v0.48.0 to v0.49.0. +- Update `github.com/aws/aws-sdk-go-v2/config` from 1.13.1 to 1.15.3 +- Update `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`. +- Update `github.com/aws/aws-sdk-go-v2/credentials` from 1.8.0 to 1.11.2. +- Update `github.com/containerd/containerd` from v1.5.9 to v1.5.11. +- Update `github.com/miekg/dns` from 1.1.46 to 1.1.48. +- Update `github.com/gopcua/opcua` from v0.3.1 to v0.3.3 +- Update `github.com/aws/aws-sdk-go-v2/service/dynamodb`. +- Update `github.com/xdg/scram` from 1.0.3 to 1.0.5. +- Update `go.mongodb.org/mongo-driver` from 1.8.3 to 1.9.0. +- Update `starlark 7a1108eaa012->d1966c6b9fcd`. + +## v1.22.1 [2022-4-6] + +- Update `gonum.org/v1/gonum` from 0.9.3 to 0.11.0. +- Update `github.com/golang-jwt/jwt/v4` from 4.2.0 to 4.4.1. +- Update `gopsutil` and associated dependencies for improved OpenBSD support. +- Fix default value for logfile rotation interval. + +## Input plugin updates +- Intel PMU (`intel_pmu`): Fix slow running intel-pmu test. +- Cloud PubSub (`cloud_pubsub`): Skip longer integration tests on `-short` mode. +- Cloud PubSub Push (`cloud_pubsub_push`): Reduce timeouts and sleeps. +- SQL Server (`sqlserver`): Fix inconsistencies in `sql*Requests` queries. +- ZFS (`zfs`): Fix redundant pool tag. +- vSphere (`vsphere`): Update debug message information. + +## Output plugin updates +- Azure Monitor (`azure_monitor`): Include body in error message. +- HTTP (`http`): Switch HTTP 100 test case values. + +## Processor plugin updates +- TopK (`topk`) Clarify the `k` and `fields` parameters. + +## New external plugins +- [PSI External Plugin](https://github.com/gridscale/linux-psi-telegraf-plugin/blob/main/README.md)(`external.psi`) - Contributed by [@ajfriesen](https://github.com/ajfriesen). + +## v1.22.0 [2022-3-22] + +## Features + +- Add `autorestart` and `restartdelay` flags to Windows service +- Add builds for `riscv64`. +- Add file version and icon to `win.exe`. +- Add `systemd` notify support. +- Check TLS configuration early to catch missing certificates. +- Implement collection offset. +- `common.auth`: HTTP basic auth. +- `common.cookie`: Support headers with cookie auth. +- `common.proxy`: Add `socks5` proxy support. +- Improve error logging on plugin initialization. + +## Bug fixes + +- Print loaded plugins and deprecations for once and test. +- Remove signed MacOS artifacts. +- Run `go mod tidy`. +- Fix `prometheusremotewrite` wrong timestamp unit. +- Fix sudden close ccaused by OPC UA inpu. +- Update `containerd` to 1.5.9. +- Update `go-sensu` to v2.12.0. +- Update `gosmi` from v0.4.3 to v0.4.4. +- Update parsing logic of `config.duration`. +- Update precision parameter default value. +- Use `sha256` for rpm digest. +- Warning output when running with `--test`. +- Graceful shutdown of Telegraf with Windows service. +- Add push-only updated values flag to histogram aggregator. +- `common.cookie`: Address flaky tests in cookie_test.go and graylog_test.go. +- `common.shim`: Linter fixes. +- Do not save cache on i386 builds. +- Add error msg for missing environment variables in configuration file. +- Fix panic in parsers due to missing log for all plugins using `setparserfunc`. +- Grab table columns more accurately. +- Improve parser tests by using `go-cmp/cmp`. +- Linter fixes for `config/config.go`. +- Log error when loading mibs. +- Fix Mac signing issue with arm64. + +## New plugins + +### Inputs + +- [Hashicorp Consul Agent Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/consul_metrics)(`consul_agent`) - Contributed by [@efbar](https://github.com/efbar). +- [Hashicorp Nomad Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nomad)(`nomad`) - Contributed by [@efbar](https://github.com/efbar). +- [Hashicorp Vault Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/vault)(`vault`) - Contributed by [@efbar](https://github.com/efbar). +- [Hugepages Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/hugepages)(`hugepages`) - Contributed by [@zak-pawel](https://github.com/zak-pawel). +- [Mock Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mock)(`mock`) - Contributed by [InfluxData](https://github.com/influxdata). +- [Redis Sentinel Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis_sentinel)(`redis_sentinel`) - Contributed by [@spideyfusion](https://github.com/spideyfusion). +- [Socketstat Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socketstat)(`socketstat`) - Contributed by [@sajoupa](https://github.com/sajoupa). +- [XtremIO Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/xtremio)(`xtremio`) - Contributed by [@cthiel42](https://github.com/cthiel42). + +### Processors + +- [Noise Processor](https://github.com/influxdata/telegraf/tree/master/plugins/processors/noise) (`noise`) - Contributed by [@wizarq](https://github.com/wizarq). + +### Input plugin updates + +- Aerospike (`aerospike`): Update `github.com/aerospike/aerospike-client-go` from 1.27.0 to 5.7.0. +- Bond (`bond`): Add additional stats. +- Directory Monitor (`directory_monitor`): + - Update `djherbis/times` and fix `dependabot`. + - Plugin restructuring. +- Disk (`disk`): Fix missing storage in container. +- Docker (`docker`): + - Keep field type of `tasks_desired` the same. + - Update memory usage calculation. + - Update client API version. +- ECS (`ecs`): Use current time as timestamp. +- Execd `execd`: Add newline for Prometheus parsing. +- File (`file`): Statefull parser handling. +- GNMI (`gnmi`): Add dynamic tagging. +- Graylog (`graylog`): + - Add `toml` tags. + - Add `timeout-setting`. + - Update documentation to use current URLs. +- HTTP (`http`): Ensure http body is empty. +- HTTP Listener v2 (`http_listener_v2`): Revert deprecation. +- Internet speed (`internet_speed`): Add caching. +- IPset (`ipset`): Fix crash when command not found. +- JSON V2 (`json_v2`): + - Allow multiple optional objects. + - Use raw values for timestamps. +- Kibana (`kibana`): Add `heap_size_limit` field. +- Logparser (`logparser`): + - Add comment. + - Fix panic due to missing log. +- MDStat (`mdstat`): Fix when sync is less than 10%. +- Memcached (`memcached`): Gather additional stats. +- Modbus `modbus`: + - Make Telegraf compile on Windows with golang 1.16.2. + - Re-enable openbsd support. + - Update documentation. + - Add `per-request` tags. + - Support multiple slaves (gateway feature). +- MQTT Consumer (`mqtt_consumer`): Topic extracting no longer requires all three fields. +- NFS Client (`nfsclient`): Add new field. +- NTPQ (`ntpq`): Correctly read long poll output. +- OPCUA (`opcua`): + - Accept non-standard OK status by implementing a configurable workaround. + - Add more data to error log. + - Remove duplicate addition of fields. +- OpenLDAP (`openldap`): Update go-ldap to v3.4.1. +- OpenStack (`openstack`): Fix typo. +- OpenWeatherMap (`openweathermap`): Add `feels_like` field. +- PHPfpm (`phpfpm`): Ensure CI tests runs against i386. +- PostgreSQL (`postgresql`): Add option to disable prepared statements. +- SMART (`smart`): Add concurrency configuration option, support and lint fixes. +- SNMP `(snmp`): + - Respect number of retries configured. + - Use the correct path when evaluating symlink. + - Add option to select translator. + - Check index before assignment. + - Do not require networking during tests. + - Ensure folders do not get loaded more than once. + - Fix panic due to no module. + - Fix errors if mibs folder doesn't exist. + - Optimize locking for mibs loading. +- SNMP Trap (`snmp_trap`): + - Collapsed fields by calling more in-depth function. + - Deprecate unused timeout configuration option. +- SQL (`sql`): Add Clickhouse driver. +- StatsD (`statsd`): Sanitize names. +- Syslog (`syslog`): Add rfc3164 to rfc5424 translation to docs. +- System (`system`): Remove verbose logging. +- Windows Performance Counter (`win_perf_counter`): + - Allow errors to be ignored. + - Implemented support for reading raw values, added tests, and update documentation. +- X.509 Certificate (`x509_cert`): + - Mark `testgatherudpcert` as an integration test. + - Add `exclude_root_certs` option. +- ZFS (`zfs`): Pool detection and metrics gathering for ZFS 2.1.x. + + +### Output plugin updates + +- AMQP (`amqp`): Check for nil client before closing. +- ElasticSearch (`elasticsearch`): + - Implement `nan` and `inf` handling. + - Add bearer token support. +- Graylog (`graylog`): Fix to field prefixes. +- Groundwork (`groundwork`): + - Set `nextchecktime` to `lastchecktime`. + - Update SDK and improve logging. + - Process group tag. +- InfluxDB V2 (`influxdb_v2`): Include bucket name in error messages. +- SQL (`sql`): Fix unsigned settings. +- Stackdriver (`stackdriver`): Cumulative interval start times. +- Syslog (`syslog`): Correctly set trailer. +- Timestream (`timestream`): Fix batching logic with write record and introduce concurrent requests. +- Datadog (`datadog`): Add compression. +- HTTP (`http`): + - Add optional list of non-retryable status codes. + - Support AWS managed service for Prometheus. +- Websocket `websocket`: `socks5` proxy support. +- Wavefront (`wavefront`): + - Flush sender on error to clean up broken connections. + - Run `gofmt`. + - Fix panic if no mibs folder is found. + +## Parser plugin updates + +- CSV (`csv`): + - Empty import tzdata for Windows binaries. + - Fix typo. +- Ifname (`ifname`): + - Eliminate mib dependency. + - Parallelism fix. +- JSON V2 (`parsers.json_v2`): + - Allow optional paths and handle wrong paths correctly. + - Check if gpath exists and support optional in fields/tags. + - Fixes to timestamp setting. +- Nagios (`nagios`): Use real error for logging. +- XPath (`xpath`): + - Handle duplicate registration of protocol-buffer files gracefully. + - Fix typo. + +### Dependency updates + +- Update `github.com/azure/azure-kusto-go` from 0.5.0 to 0.5.2. +- Update `github.com/nats-io/nats-server/v2` from 2.7.3 to 2.7.4. +- Update `github.com/shopify/sarama` from 1.29.1 to 1.32.0. +- Update `github.com/shirou/gopsutil`/v3 from 3.21.12 to 3.22.2. +- Update `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`. +- Update `github.com/miekg/dns` from 1.1.43 to 1.1.46. +- Update `github.com/aws/aws-sdk-go-v2/service/dynamodb`. +- Update `github.com/nats-io/nats-server/v2` from 2.7.2 to 2.7.3. +- Update `github.com/aws/aws-sdk-go-v2/config` from 1.8.3 to 1.13.1. +- Update `github.com/testcontainers/testcontainers-go`. +- Update `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`. +- Update `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`. +- Update `github.com/wavefronthq/wavefront-sdk-go` from 0.9.9 to 0.9.10. +- Update `github.com/clickhouse/clickhouse-go` from 1.5.1 to 1.5.4. +- Update `k8s.io/api` from 0.23.3 to 0.23.4. +- Update `cloud.google.com/go/pubsub` from 1.17.1 to 1.18.0. +- Update `github.com/newrelic/newrelic-telemetry-sdk-go`. +- Update `github.com/aws/aws-sdk-go-v2/service/dynamodb` from 1.5.0 to 1.13.0. +- Update `github.com/sensu/sensu-go/api/core/v2` from 2.12.0 to 2.13.0. +- Update `github.com/gophercloud/gophercloud` from 0.16.0 to 0.24.0. +- Update `github.com/jackc/pgx/v4` from 4.14.1 to 4.15.0. +- Update `github.com/aws/aws-sdk-go-v2/service/sts` from 1.7.2 to 1.14.0. +- Update all `go.opentelemetry.io` dependencies. +- Update `github.com/signalfx/golib/v3` from 3.3.38 to 3.3.43. +- Update `github.com/aliyun/alibaba-cloud-sdk-go`. +- Update `github.com/denisenkom/go-mssqldb` from 0.10.0 to 0.12.0. +- Update `github.com/gopcua/opcua` from 0.2.3 to 0.3.1. +- Update `github.com/nats-io/nats-server/v2` from 2.6.5 to 2.7.2. +- Update `k8s.io/client-go` from 0.22.2 to 0.23.3. +- Update `github.com/aws/aws-sdk-go-v2/service/kinesis` from 1.6.0 to 1.13.0. +- Update `github.com/benbjohnson/clock` from 1.1.0 to 1.3.0. +- Update `github.com/vmware/govmomi` from 0.27.2 to 0.27.3. +- Update `github.com/prometheus/client_golang` from 1.11.0 to 1.12.1. +- Update `go.mongodb.org/mongo-driver` from 1.7.3 to 1.8.3. +- Update `github.com/google/go-cmp` from 0.5.6 to 0.5.7. +- Update `go.opentelemetry.io/collector/model` from 0.39.0 to 0.43.2. +- Update `github.com/multiplay/go-ts3` from 1.0.0 to 1.0.1. +- Update `cloud.google.com/go/monitoring` from 0.2.0 to 1.2.0. +- Update `github.com/vmware/govmomi` from 0.26.0 to 0.27.2. +- Update `google.golang.org/api` from 0.54.0 to 0.65.0. +- Update `github.com/antchfx/xmlquery` from 1.3.6 to 1.3.9. +- Update `github.com/nsqio/go-nsq` from 1.0.8 to 1.1.0. +- Update `github.com/prometheus/common` from 0.31.1 to 0.32.1. +- Update `cloud.google.com/go/pubsub` from 1.17.0 to 1.17.1. +- Update `github.com/influxdata/influxdb-observability/influx2otel` from 0.2.8 to 0.2.10. +- Update `github.com/shirou/gopsutil/v3` from 3.21.10 to 3.21.12. +- Update `github.com/jackc/pgx/v4` from 4.6.0 to 4.14.1. +- Update `github.com/azure/azure-event-hubs-go/v3` from 3.3.13 to 3.3.17. +- Update `github.com/gosnmp/gosnmp` from 1.33.0 to 1.34.0. +- Update `github.com/hashicorp/consul/api` from 1.9.1 to 1.12.0. +- Update `github.com/antchfx/xpath` from 1.1.11 to 1.2.0. +- Update `github.com/antchfx/jsonquery` from 1.1.4 to 1.1.5. +- Update `github.com/prometheus/procfs` from 0.6.0 to 0.7.3. +- Update `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs` from 1.5.2 to 1.12.0. +- Update `github.com/kardianos/service` from 1.0.0 to 1.2.1. +- Update `github.com/couchbase/go-couchbase` from 0.1.0 to 0.1.1. +- Update `github.com/pion/dtls/v2` from 2.0.9 to 2.0.13. +- Update `github.com/eclipse/paho.mqtt.golang` from 1.3.0 to 1.3.5. + +## v1.21.4 [2022-2-16] + +- Update to Go 1.17.7 to address [three security issues](https://groups.google.com/g/golang-announce/c/SUsQn0aSgPQ/m/gx45t8JEAgAJ?pli=1) in the library. +- Update all `go.opentelemetry.io` from 0.24.0 to 0.27.0. +- Update `github.com/signalfx/golib/v3` from 3.3.38 to 3.3.43. +- Update `github.com/aliyun/alibaba-cloud-sdk-go` from 1.61.1004 to 1.61.1483. +- Update `github.com/denisenkom/go-mssqldb` from 0.10.0 to 0.12.0. +- Update `github.com/gopcua/opcua` from 0.2.3 to 0.3.1. +- Update `github.com/nats-io/nats-server/v2` from 2.6.5 to 2.7.2. +- Update `k8s.io/client-go` from 0.22.2 to 0.23.3. +- Update `github.com/aws/aws-sdk-go-v2/service/kinesis` from 1.6.0 to 1.13.0. +- Update `github.com/benbjohnson/clock` from 1.1.0 to 1.3.0. +- Update `github.com/Azure/azure-kusto-go` from 0.5.0 to 0.5.2. +- Update `github.com/vmware/govmomi` from 0.27.2 to 0.27.3. +- Update `github.com/prometheus/client_golang` from 1.11.0 to 1.12.1. +- Update `go.mongodb.org/mongo-driver` from 1.7.3 to 1.8.3. +- Update `github.com/google/go-cmp` from 0.5.6 to 0.5.7. +- Update `go.opentelemetry.io/collector/model` from 0.39.0 to 0.43.2. +- Update `github.com/multiplay/go-ts3` from 1.0.0 to 1.0.1. +- Update `cloud.google.com/go/monitoring` from 0.2.0 to 1.2.0. +- Update `github.com/vmware/govmomi` from 0.26.0 to 0.27.2. + +### Input plugin updates +- Docker (`docker`): Update memory usage calculation. +- ECS (`ecs`): Use current time as timestamp. +- SNMP (`snmp`): Ensure folders do not get loaded more than once. +- Windows Performance Counters (`win_perf_counters`): Add deprecated warning and version. + +### Output plugin updates +- AMQP (`amqp`): Check for nil client before closing. +- Azure Data Explorer (`azure_data_explorer`): Lower RAM usage. +- ElasticSearch (`elasticsearch`): Add scheme to fix error in sniffing option. + +### Parser plugin updates +- JSON v2 (`json_v2`): + - Fix timestamp change during execution. + - Fix incorrect handling of `timestamp_path`. + - Allow optional paths and handle wrong paths correctly. + +### Serializer updates +- Prometheus serializer (`prometheusremotewrite`): Use correct timestamp unit. + +### New External Plugins +- [apt](https://github.com/x70b1/telegraf-apt/blob/master/README.md)(`telegraf-apt`) - Contributed by [@x70b1](https://github.com/x70b1). +- [knot](https://github.com/x70b1/telegraf-knot/blob/master/README.md)(`telegraf-knot`) - Contributed by [@x70b1](https://github.com/x70b1). + +## v1.21.3 [2022-1-27] + +- Update `grpc` module to v1.44.0. +- Update `google.golang.org/api` module from 0.54.0 to 0.65.0. +- Update `antchfx/xmlquery` module from 1.3.6 to 1.3.9. +- Update `nsqio/go-nsq` module from 1.0.8 to 1.1.0. +- Update `prometheus/common` module from 0.31.1 to 0.32.1. +- Update `cloud.google.com/go/pubsub` module from 1.17.0 to 1.17.1. +- Update `influxdata/influxdb-observability/influx2otel` module from 0.2.8 to 0.2.10. +- Update `shirou/gopsutil/v3` module from 3.21.10 to 3.21.12. +- Update `jackc/pgx/v4` module from 4.6.0 to 4.14.1. +- Update `Azure/azure-event-hubs-go/v3` module from 3.3.13 to 3.3.17. +- Update `gosnmp/gosnmp` module from 1.33.0 to 1.34.0. +- Update `hashicorp/consul/api` module from 1.9.1 to 1.12.0. +- Update `antchfx/xpath` module from 1.1.11 to 1.2.0. +- Update `antchfx/jsonquery` module from 1.1.4 to 1.1.5. +- Update `prometheus/procfs` module from 0.6.0 to 0.7.3. +- Update `aws/aws-sdk-go-v2/service/cloudwatchlogs` module from 1.5.2 to 1.12.0. +- Update `kardianos/service` module from 1.0.0 to 1.2.1. +- Update `couchbase/go-couchbase` module from 0.1.0 to 0.1.1. +- Update `pion/dtls/v2` module from 2.0.9 to 2.0.13. +- Update `containerd/containerd` module to 1.5.9. + +### Input plugin updates +- Execd (`execd`): Resolve a Promethues text format parsing error. +- IPset (`ipset`): Prevent panic from occurring after startup. +- OPC-UA (`opc_ua`): Fix issue where fields were being duplicated. +- HTTP (`http`): Prevent server side error message. +- SNMP (`snmp`): Fix error when a MIBs folder doesn't exist. +- SNMP Trap (`snmp_trap`): Fix translation of partially resolved OIDs. + +### Output plugin updates +- AMQP (`amqp`): Update to avoid connection leaks. +- Timestream (`timestream`): + - Fix an issue with batching logic of write records. + - Introduce concurrent requests. +- Stackdriver (`stackdriver`): Send correct interval start times for all counter metrics. +- Syslog (`syslog`): Correctly set the ASCII trailer per [RFC 6587](https://datatracker.ietf.org/doc/html/rfc6587). + +### Parser plugin updates +- Nagios (`nagios`): Log correct errors when executing commands to aid in debugging. +- JSON v2 (`json_v2`): Fix timestamp precision when using `unix_ns` timestamp format. +- Wavefront (`wavefront`): Add missing setting `wavefront_disable_prefix_conversion`. + + +## v1.21.2 [2022-1-5] + +- Add arm64 MacOS builds for M1 devices. +- Add RISC-V64 Linux builds. +- Complete numerous changes to CircleCI config to ensure more timely completion and more clear execution flow. +- Update `github.com/djherbis/times` module from v1.2.0 to v1.5.0. +- Update `github.com/go-ldap/ldap/v3` module from v3.1.0 to v3.4.1. +- Update `github.com/gwos/tcg/sdk` module to v0.0.0-20211223101342-35fbd1ae683c. + + +### Input plugin updates +- Disk (`disk`): Fix issue of missing disks when running Telegraf in a container. +- DPDK (`dpdk`): Add a note to documentation about socket availability. +- Logparser (`logparser`): Resolve panic in the logparser plugins due to a missing `Log`. +- SNMP (`snmp`): + - Resolve panic due to a missing `gosmi` module. + - Resolve panic to check the index before assignment where a floating `::` exists. + - Resolve a panic when no MIBs folder was found. + - Ensure the module load order to avoid an SNMP marshal error. + - Now more accurately grabs MIB table columns. + - Networking no longer required during tests. +- SNMP Trap (`snmp_trap`): Documented deprecation of the `timeout` setting. + + +### Parser plugin updates +- CSV (`csv`): Use an empty import of `tzdata` to correctly set the time zone. + + +## v1.21.1 [2021-12-16] + +## Bug fixes +- Fix panic in parsers due to missing log. +- Update `go-sensu module` to v2.12.0 +- Fix typo in OpenStack input plugin. + +## Features +- Add SMART input plugin concurrency configuration option, `nvme-cli v1.14+` support, and lint fixes. + +## v1.21 [2021-12-15] + +{{% note %}} +The signing for RPM digest has changed to use sha256 to improve security. Due to this change, RPM builds might not be compatible with RHEL6 and older releases. (Telegraf only supports releases in RHEL production.) +{{% /note %}} + +- Restart Telegraf service if it's already running and upgraded via RPM. +- Print loaded plugins and deprecations for once and test flags. +- Update `eclipse/paho.mqtt.golang` module from 1.3.0 to 1.3.5. +- Shutdown Telegraf gracefully on Windows Service. +- Skip `knxlistener` when writing the sample configuration file. +- Update `nats-sever` to support `openbsd`. +- Revert unintented corruption of the Makefile. +- Filter client certificates by DNS names. +- Update `etc/telegraf.conf` and `etc/telegraf_windows.conf`. +- Add full metadata to configuration for `common.kafka`. +- Update `google.golang.org/grpc` module from 1.39.1 to 1.40.0. + +## Input plugin updates +- Cloudwatch (`cloudwatch`): Fix metrics collection. +- CPU (`cpu`): Update `shirou/gopsutil` from v2 to v3. +- Directory Monitor (`directory_monitor`): + - Fix to when when data format is CSV and `csv_skip_rows>0` and `csv_header_row_count>=1`. + - Adds the ability to create and name a tag containing the filename. +- ElasticSearch (`elasticsearch_query`): Add debug query output. +- HTTP Listener v2: (`http_listener_v2`): Fix panic on close to check that Telegraf is closing. +- Kubernetes Inventory (`kube_inventory`): Set TLS server name configuration properly. +- Modbus (`modbus`): Update connection settings (serial). +- MQTT Consumer (`mqtt_consumer`): + - Extracting no longer requires all three fields + - Enable extracting tag values from MQTT topics +- OPC UA (`opc_ua`): + - Fix sudden closing of Telegraf. + - Allow user to select the source for the metric timestamp. +- Prometheus (`prometheus`): + - Check error before defer. + - Add `ignore_timestamp` option. +- Puppet (`puppetagent`): Add measurements from puppet 5. +- SNMP (`snmp`): + - Update snmp plugin to respect number of retries configured. + - Optimize locking for SNMP MIBs loading. + - Update to use gosmi. + - Remove `snmptranslate` from READme and fix default path. + - Merge tables with different indexes. +- StatsD (`statsd`): Fix parse error. +- Sysstat (`sysstat`): Use unique temporary file. +- Windows Performance Counters (`win_perf_counters`): Add setting to ignore localization. +- Windows Services (`win_services`): Add exclude filter. +- ZFS (`zfs`): Pool detection and metrics gathering for ZFS >= 2.1.x + +## Output plugin updates +- Register `bigquery` to all output plugins. +- Azure Data Explorer (`azure_data_explorer`): + - Add option to skip table creation. + - Add `json_timestamp_layout` option. +- ElasticSearch (`elasticsearch`): Implement NaN and inf handling. +- Graylog (`graylog`): + - Ensure graylog spec fields not prefixed with `_`. + - Failing test due to port already in use. + - Mute UDP/TCP tests by marking them as integration. + - TLS support and message format. + - Add TCP support. +- HTTP (`http`): Add `use_batch_format`. +- InfluxDB V2 (`influxdb_v2`): Add retry to 413 errors with InfluxDB output. +- Wavefront (`wavefront`): Flush sender on error to clean up broken connections. + + +## Parser plugin updates +- XPath (`xpath`): Handle duplicate registration of protocol-buffer files gracefully +- JSON v2 (`json_v2`): + - Parser timestamp setting order. + - Remove dead code. + - Support defining field/tag tables within an object table. + +## Processor plugin updates +- IfName (`ifname`): + - Eliminate MIB dependency. + - Parallelism fix. + - Add more details to log messages. +- Starlark (`starlark`): Example for processing `sparkplug_b` messages. +- RegEx (`regex`): Extend to allow renaming of measurements, tags, and fields. + +## Aggregator plugin updates +- Implement deprecation infrastructure +- Add support of aggregator as Starlark script + +## New plugins + +#### Inputs + +- [Intel PMU Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/intel_pmu/README.md)(`intel_pmu`) - Contributed by [@bkoltowski](https://github.com/bkotlowski). +- [Logical Volume Manager Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/lvm/README.md)(`lvm`) - Contributed by @InfluxData. +- [OpenStack Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/openstack)(`openstack`) - Contributed by [@singamSrikar].(https://github.com/singamSrikar). + +### Outputs +- [Azure Event Hubs Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/event_hubs/README.md)(`event_hubs`) - Contributed by [@tomconte](https://github.com/tomconte). +- [GroundWork Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/groundwork/README.md)(`groundwork`) - Contributed by [@VladislavSenkevich)(https://github.com/VladislavSenkevich). +- [MongoDB Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/mongodb/README.md)(`mongodb`) - Contributed by [@bustedware](https://github.com/bustedware). + +#### Aggregator +- [Starlark Aggregator](https://github.com/influxdata/telegraf/blob/master/plugins/aggregators/starlark/README.md)(`starlark`) - Contributed by [@essobedo](https://github.com/essobedo). + +## v1.20.4 [2021-11-17] + +- Update `BurntSushi/toml` from 0.3.1 to 0.4.1. +- Update `gosnmp` module from 1.32 to 1.33. +- Update `go.opentelemetry.io/otel` from v0.23.0 to v0.24.0. +- Fix plugin linters. + +### Input plugin updates +- Cisco Model-Driven Telemetry (`cisco_telemetry_mdt`): Move to new protobuf library. +- InfluxDB (`influxdb`): Update input schema docs. +- Intel RDT (`intel_rdt`): Correct the timezone to use local timezone by default instead of UTC from metrics gathered from the `pqos` tool. +- IPMI Sensor (`ipmi`): Redact passwords in log files to maintain security. +- Modbus (`modbus`): Do not build on OpenBSD. +- MySQL (`mysql`): + - Fix type conversion follow-up. + - Correctly set the default paths. +- NVIDIA SMI (`nvidia_smi`): Correctly set the default paths. +- Proxmox (`proxmox`): Parse the column types of the server status. +- SQL Server (`sqlserver`): Add elastic pool in supported versions. + +### Output plugin updates +- Loki (`loki`): Include the metric name as a label for improved query performance and metric filtering. + +## v1.20.3 [2021-10-28] + +- Update Go to 1.17.2. +- Update `gjson` module to v1.10.2. +- Update Snowflake database driver module to 1.6.2. +- Update `github.com/apache/thrift` module from 0.14.2 to 0.15.0. +- Update `github.com/aws/aws-sdk-go-v2/config` module from 1.8.2 to 1.8.3. +- Update `github.com/Azure/azure-kusto-go` module from 0.3.2 to 0.4.0. +- Update `github.com/docker/docker` module from 20.10.7+incompatible to 20.10.9+incompatible. +- Update `github.com/golang-jwt/jwt/v4` module from 4.0.0 to 4.1.0. +- Update `github.com/jaegertracing/jaeger` module from 1.15.1 to 1.26.0. +- Update `github.com/prometheus/common` module from 0.26.0 to 0.31.1. + +### Input plugin updates +- IPMI Sensor (`ipmi_sensor`): Redact IPMI password in logs. +- Kube Inventory (`kube_inventory`): + - Do not skip resources with zero s/ns timestamps. + - Fix segfault in ingress, persistentvolumeclaim, statefulset. +- Procstat (`procstat`): Revert and fix tag creation. +- SQL Server (`sqlserver`): Add integration tests. +- Amazon CloudWatch (`cloudwatch`): Use the AWS SDK v2 library. +- ZFS (`zfs`): Check return code of zfs command for FreeBSD. +- Ethtool (`ethtool`): Add normalization of tags. +- Internet Speed (`internet_speed`): Resolve missing latency field. +- Prometheus (`prometheus`): + - Decode Prometheus scrape path from Kubernetes labels. + - Move err check to correct place. +- Procstat (`procstat`): Correct conversion of int with specific bit size. +- Webhooks (`webhooks`): Provide more fields. +- MongoDB (`mongodb`): Solve compatibility issue when using 5.x relicaset. +- Intel RDT (`intel_rdt`): Allow sudo usage. +- MySQL (`mysql`): Fix inconsistent metric types. + +### Processor plugin updates +- Starlark (`starlark`): Pop operation for non-existing keys. + +### New plugins + +#### External +- [IBM DB2](https://github.com/bonitoo-io/telegraf-input-db2): Contributed by @sranka. +- [Oracle Database](https://github.com/bonitoo-io/telegraf-input-oracle): Contributed by @sranka. + +## v1.20.2 [2021-10-07] + +- Fix makefile typo that prevented i386 tar and rpm packages from being built. + +### Input plugin updates +- Cloudwatch (`cloudwatch`): Use new session API. +- Stackdriver (`stackdriver`): Migrate to `cloud.google.com/go/monitoring/apiv3/v2`. + +### Parser plugin updates +- JSON V2 (`json_v2`): Duplicate line_protocol when using object and fields. +- Influx (`influx`): Fix memory leak. + +## v1.20.1 [2021-10-06] + +- Fix output buffer never completely flushing. +- Update `k8s.io/apimachinery` module to 0.22.2. +- Update `consul` module to 1.11.0. +- Update `github.com/testcontainers/testcontainers-go` module to 0.11.1. +- Update `github.com/Azure/go-autorest/autorest/adal` module. +- Update `github.com/Azure/go-autorest/autorest/azure/auth module` to 0.5.8. +- Update `cloud.google.com/go/pubsub` module to 1.17.0. +- Update `github.com/aws/smithy-go` module to 1.8.0. + +### Input plugin updates + +- ElasticSearch (`elasticsearch_query`): Add custom time/date format field. +- OpenTelemetry (`opentelemetry`): Fix error returned to OpenTelemetry client. +- Couchbase (`couchbase`): Fix insecure certificate validation. +- MongoDB (`mongodb`): Fix panic due to nil dereference. +- Intel RDT (`intel_rdt`): Prevent timeout when logging. +- Procstat (`procstat`): Add missing tags. + +### Output plugin updates + +- Loki (`loki`): Update http_headers setting to match sample config. +- MQTT (`mqtt`): Add "keep alive" config option and documentation around issue with eclipse/mosquito version. + +## v.1.20 [2021-09-16] + +- Update Go to 1.17.0 +- Update runc module to v1.0.0-rc95. +- Migrate `dgrijalva/jwt-go` to `golang-jwt/jwt/v4`. +- Update `thrift` module to 0.14.2 and `zipkin-go-opentracing` 0.4.5. +- Update `cloud.google.com/go/pubsub` module to 1.15.0. +- Update `github.com/tinylib/msgp` module to 1.1.6. + + +### Input plugin updates + +- MongoDB (`mongodb`): Change command based on server version. +- SQL (`sql`): Make timeout apply to single query. +- SystemD Units (`systemd_units`): Add pattern support. +- Cloudwatch (`cloudwatch`): + - Pull metrics from multiple AWS CloudWatch namespaces. + - Support AWS Web Identity Provider. +- Modbus (`modbus`): Add support for RTU over TCP. +- Procstat (`procstat`): Support cgroup globs and include `systemd` unit children. +- Suricata (`suricata`): Support alert event type. +- Prometheus (`prometheus`): Add ability to query Consul Service catalog. +- HTTP Listener V2 (`http_listener_v2`): Allow multiple paths and add path_tag. +- HTTP (`http`): Add cookie authentication. +- Syslog (`syslog`): Add RFC 3164 support for BSD-style syslog messages. +- Jenkins (`jenkins`): Add option to include nodes by name. +- SNMP Trap (`snmp_trap`): Improve MIB lookup performance. +- Smart (`smart`): Add power mode status. +- New Relic (`newrelic`): Add option to override `metric_url`. + + +### Output plugin updates + +- Dynatrace (`dynatrace`): Remove hardcoded int value. +- InfluxDB v2 (`influxdb_v2`): Increase accepted retry-after header values. +- SQL (`sql`): Add bool datatype. +- Prometheus Client (`prometheus_client`): Add Landing page. +- HTTP (`http`): Add cookie authentication. + +### Serializer plugin updates + +- Prometheus (`prometheus`): Update timestamps and expiration time as new data arrives. + +### Parser plugin updates + +- XPath (`xpath`): Add JSON, MessagePack, and Protocol-buffers format support. + +### New plugins + +#### Input + +- [Elasticsearch Query](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch_query) - Contributed by @lpic10 +- [Internet Speed Monitor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/internet_speed) - Contributed by @ersanyamarya +- [mdstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mdstat) - Contributed by @johnseekins +- [AMD ROCm System Management Interface (SMI)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/amd_rocm_smi) - Contributed by @mconcas + +#### Output +- [OpenTelemetry](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentelemetry) - Contributed by @jacobmarble +- [Azure Data Explorer](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/azure_data_explorer) - Contributed by @minwal + +## v.1.19.3 [2021-08-19] + +- Update `sirupsen/logrus` module from 1.7.0 to 1.8.1. +- Update `testcontainers/testcontainers-go` module from 0.11.0 to 0.11.1. +- Update `golang/snappy` module from 0.0.3 to 0.0.4. +- Update `aws/aws-sdk-go-v2` module from 1.3.2 to 1.8.0. +- Update `sensu/go` module to v2.9.0. +- Update `hashicorp/consul/api` module to 1.9.1. + +### Input plugin updates +- Prometheus (`prometheus`): Fix Kubernetes pod discovery. +- Redis (`redis`) Improve redis commands documentation. +- Clickhouse (`clickhouse`): Fix panic, improve handling empty result set. +- OPC UA: (`opcua`): + - Avoid closing session on a closed connection. + - Fix reconnection regression introduced in 1.19.1. + - Don't skip good quality nodes after encountering bad quality node. +- Kubernetes Inventory (`kube_inventory`): Fix k8s nodes and pods parsing error. +- PostgreSQL (`postgresql`): Normalize unix socket path. +- vSphere (`vsphere`): Update `vmware/govmomi` module to v0.26.0 in order to support vSphere 7.0. + +### Output plugin updates +- Loki (`loki`): Sort logs by timestamp before writing to Loki. +- CrateDB (`cratedb`): Replace dots in tag keys with underscores. + +### Processor plugin updates +- AWS (`aws`): Refactor EC2 init. + +## v.1.19.2 [2021-07-28] + +- Update Go to 1.16.6. +- Linter fixes. +- Update `dynatrace-metric-utils-go` module to v0.2.0. +- Detect changes to configuration and reload Telegraf. + +### Input plugin updates +- CGroup (`couchbase`): Allow for multiple keys when parsing cgroups. +- Kubernetes (`kubernetes`): Update plugin to attach pod labels to the `kubernetes_pod_volume` and `kubernetes_pod_network` metrics. +- Kubernetes Inventory (`kube_inventory`): Fix a segmentation fault when selector labels were not present on a persistent volume claim. +- MongoDB (`mongodb`): Switch to official `mongo-go-driver` module to fix an SSL authentication failure. +- NSQ Consumer (`couchbase`): Fix a connection error when attempting to connect to an empty list of servers. +- Prometheus (`prometheus`): Fix Prometheus cAdvisor authentication. +- SQL (`sql`): Fix issue when handling a boolean column. +- SQL Server (`sqlserver`): + - Add TempDB troubleshooting stats and missing v2 query metrics. + - Update to provide more detailed error messaging. +- StatsD (`statsd`): Fix a regression that didn't allow integer percentiles. +- x509 Certificate (`x509_cert`): Fix an issue where plugin would hang indefinitely to a UDP connection. + +### Output plugin updates +- Dynatrace Output (`dynatrace`): + - Update plugin to allow optional default dimensions. + - Fix a panic caused by uninitialized `loggedMetrics` map. +- InfluxDB (`influxdb`): Fix issue where metrics were reporting as written but not actually written. + +### Processor plugin updates +- IfName (`ifname`): Fix issue with SNMP empty metric name. + +### Parser plugin updates +- JSON v2 (`json_v2`): + - Simplify how nesting is handled in parser. + - Add support for large uint64 and int64 numbers. + - Fix an issue to handle nested objects in arrays properly. + + +## v.1.19.1 [2021-07-07] + +- Update nat-server module to v2.2.6. +- Update apimachinary module to v0.21.1. +- Update jwt module to v1.2.2 and jwt-go module to v3.2.3. +- Update couchbase module to v0.1.0. +- Update signalfx module to v3.3.34. +- Update gjson module to v1.8.0. +- Linter fixes. + +### Input plugin updates +- SQL Server (`sqlserver`): Require authentication method to be specified. +- Kube Inventory (`kube_inventory`): Fix segfault. +- Couchbase (`couchbase`): Fix panic. +- KNX (`knx_listener`): Fix nil pointer panic. +- Procstat (`procstat`): Update gopsutil module to fix panic. +- RabbitMQ (`rabbitmq`) Fix JSON unmarshall regression. +- Dovecot (`dovecot`): Exclude read-timeout from being an error. +- StatsD(`statsd`) Don't stop parsing after parsing error. +- SNMP (`snmp`): Add a check for oid and name to prevent empty metrics. +- (`x509_cert`): + - Fix 'source' tag for https. + - Fix SNI support. + +### Output plugin updates +- (`http`): Fix toml error when parsing insecure_skip_verify. + +### Parser plugin updates +- (`json_v2`): Don't require tags to be added to included_keys. + +## v1.19.0 [2021-06-17] + +- Update Go to 1.16.5. + +### Bug fixes +- Update pgx to v4. +- Fix reading configuration files starting with HTTP: +- `serializers.prometheusremotewrite`: Update dependency and remove tags with empty values. +- `outputs.kafka`: Don't prevent telegraf from starting when there's a connection error. +- `parsers.prometheusremotewrite`: Update prometheus dependency to v2.21.0. +- `outputs.dynatrace`: Use dynatrace-metric-utils. +- Many linter fixes. (Thanks @zak-pawel and all!) + +### Features +- Configuration file environment variable can now be a URL. +- Add named timestamp formats. +- Allow multiple `--config` and `--config-directory` flags. + +### Plugin updates + +#### Input plugin updates +- (`aliyuncms`): Add configuration option list of regions to query. +- (`cisco_telemetry_mdt`): Add support for events and class based query. +- (`cloudwatch`): Add wildcard support in dimensions configuration. +- (`couchbase`): Add ~200 more Couchbase metrics via buckets endpoint. +- (`dovecot`): Add support for Unix domain sockets. +- (`http_listener_v2`): Add support for snappy compression +- (`http`): Add OAuth2 to HTTP input. +- (`kinesis_consumer`): Add `content_encoding` option with gzip and zlib support. +- (`logstash`): Add support for version 7 queue statistics. +- (`mongodb`): Optionally collect top statistics. +- (`mysql`): Gather all MySQL channels. +- (`ping`): Add an option to specify packet size. +- (`sqlserver`): Add an optional health metric. +- (`sqlserver`): Added `login_name`. +- (`sqlserver`): Enable Azure Active Directory (AAD) authentication. +- (`sqlserver`): input/sqlserver: Add service and save connection pools. +- (`vsphere`): Add configuration option for the historical interval duration. +- (`x509_)cert`: Wildcard support for certificate filenames. + +#### Output plugin updates +- (`datadog`): Add HTTP proxy to DataDog output. +- (`graphite`): Allow more characters in graphite tags. + +#### Parser plugin updates +- (`prometheusremotewrite`): Add Starlark script for renaming metrics. +- (`value`): Add custom field name configuration option. + +#### Processor plugin updates +- (`enum`): Support `float64`. +- (`starlark`): Add an example showing how to obtain IOPS from `diskio` input. +- (`starlark`): Add `math` module. +- (`starlark`): Add `time` module. +- (`starlark`): Support nanosecond resolution timestamp. +- (`strings`): Add UTF-8 sanitizer. + +### New plugins + +#### Input +- [Alibaba CloudMonitor Service (Aliyun)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aliyuncms) - Contributed by @i-prudnikov +- [Intel Data Plane Development Kit (DPDK)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dpdk) - Contributed by @p-zak +- [KNX](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/knx_listener) - Contributed by @DocLambda +- [OpenTelemetry](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry) - Contributed by @jacobmarble +- [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sql) - Contributed by @srebhan + +#### Output +- [AWS Cloudwatch logs](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch_logs) - Contributed by @i-prudnikov +- [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/sql) - Contributed by @illuusio +- [Websocket](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/websocket) - Contributed by @FZambia + +#### Parser +- [Prometheus Remote Write](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/prometheusremotewrite) - Contributed by @influxdata +- [JSON V2](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/json_v2) - Contributed by @influxdata + +#### External +- [Big Blue Button](https://github.com/SLedunois/bigbluebutton-telegraf-plugin) - Contributed by @SLedunois +- [dnsmasq](https://github.com/machinly/dnsmasq-telegraf-plugin) - Contributed by @machinly +- [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - Contributed by @falon +- [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - Contributed by @jcgonnard + +## v1.18.3 [2021-05-21] + +- Add FreeBSD ARMv7 build. +- Dependencies: + - Migrate from `soniah/gosnmp` to `gosnmp/gosnmp` v1.32.0. + - Migrate from `docker/libnetwork/ipvs` to `moby/ipvs`. + - Migrate from `ericchiang/k8s` to `kubernetes/client-go`. + - Update `hashicorp/consul/api` module to v1.8.1. + - Update `shirou/gopsutil` to v3.21.3. + - Update `microsoft/ApplicationInsights-Go` to v0.4.4 + - Update `gogo/protobuf` to v1.3.2. + - Update `Azure/go-autorest/autorest/azure/auth` to v0.5.6 and `Azure/go-autorest/autorest` to v0.11.17. + - Update `collectd.org` to v0.5.0. + - Update `nats-io/nats.go` to v1.10.0. + - Update `golang/protobuf` to v1.5.1. + + +### Input plugin updates + +- [Prometheus Input](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/prometheus): Add ability to set user agent when scraping Prometheus metrics. +- [Kinesis Input](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kinesis_consumer): Fix repeating parser error. +- [SQL Server Input](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver): Remove disallowed white space from `sqlServerRingBufferCPU` query. + +### Output plugin updates + +- [Elasticsearch Output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/elasticsearch/README.md): Add ability to to enable gzip compression. + + + +## v1.18.2 [2021-04-29] + +- Make JSON format compatible with nulls to ensure Telegraf successfully detects null values and returns an empty metric without error. +- Update `common.shim` by changing `NewStreamParser` to accept larger inputs from scanner. + +### Input plugin updates + +- [APCUPSD Input](https://github.com/influxdata/telegraf/blob/release-1.18/plugins/inputs/apcupsd/README.md) (`apcupsd`): + Resolve an 'ALARMDEL' bug in a forked repository. This fix ensures the plugin works when `no alarm` delay duration is set. +- [NFS Client Input](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nfsclient) (`nfsclient`): Update to successfully collect metrics other than read and write. +- [SNMP Input](https://github.com/influxdata/telegraf/blob/release-1.18/plugins/inputs/snmp/README.md) (`snmp`): Update to log snmpv3 auth failures. +- [VMware vSphere Input](https://github.com/influxdata/telegraf/blob/release-1.18/plugins/inputs/vsphere/README.md) (`vsphere`): Add `MetricLookback` setting to handle reporting delays in vCenter 6.7 and later. +- [OPC UA Client Input](https://github.com/influxdata/telegraf/blob/release-1.18/plugins/inputs/opcua/README.md) (`opcua`): Fix error handling. + +### Output plugin updates + +- [Sumo Logic Output](https://github.com/influxdata/telegraf/blob/release-1.18/plugins/outputs/sumologic/README.md) (`sumologic`): Add support to [sanitize the metric name](https://github.com/influxdata/telegraf/tree/release-1.18/plugins/serializers/carbon2#metric-name-sanitization) in Carbon2 serializer. + +### Processor plugin updates + +- [Converter Processor](https://github.com/influxdata/telegraf/blob/release-1.18/plugins/processors/converter/README.md) (`converter`): + Add support for `float64` to support converting longer hexadecimal string values to a numeric type without losing in precision. Note, if a string number exceeds the size limit for `float64`, precison may be lost. + +## v1.18.1 [2021-04-07] + +- Agent: Closes running outputs when agent reloads on SIGHUP. + +### Input plugin updates + +- [Docker Input](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker) (`docker`): + Fix panic when parsing container statistics. +- [Exec Input](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec) (`exec`): + Fix truncated messages in debug mode; debug mode now shows full messages. +- [IPMI Sensor Input](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor) (`ipmi_sensor`): + Fix panic by implementing a length check to plugin. +- [MySQL Input](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mysql) (`mysql`): + Fix the ability to handle ‘binary logs’ query for MySQL version 8.0+. +- [NFS Client Input](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nfsclient) (`nfsclient`): + Fix integer overflow in fields received by mountstat. +- [Ping Input](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ping) (`ping`): + Resolve error that prevented the agent from running when an unprivileged UDP ping was sent. Now, `SetPrivileged(true)` is always true in native mode to ensure a privileged ICMP ping is sent. +- [SNMP Input](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp) (`snmp`): + Fix `init()` when no MIBs are installed. +- [SQL Server Input](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (`sqlserver`): + Fix `sqlserver_process_cpu` calculation. +- [Tail Input](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail) (`tail`): + Added configurable option to override `path` tag. + +### Output plugin updates + +- [Azure Monitor Output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/azure_monitor) (`azure_monitor`): + Fix an issue to handle error when initializing the authentication object. +- [Yandex Cloud Monitoring Output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/yandex_cloud_monitoring) (`yandex_cloud_monitoring`): + Fix an issue to use correct computed metadata URL to get `folder-id`. + +### Processor plugin updates + +- [ifName](https://github.com/influxdata/telegraf/tree/master/plugins/processors/ifname) (`ifname`): + Retrieve interface name more efficiently. + + +## v1.18 [2021-3-17] + +### Features +- Update to Go 1.16.2. +- Add code signing for Windows and macOS. +- More SNMP v3 authentication protocols, including SHA-512. +- Add support for [DataDog distributions](https://docs.datadoghq.com/metrics/distributions/#counting-distribution-metrics) metric type. + +### New plugins + +#### Inputs +- [Beat](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/beat)(`beat`) - Contributed by [@nferch](https://github.com/nferch) +- [CS:GO](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/csgo)(`csgo`) - Contributed by [@oofdog](https://github.com/oofdog) +- [Directory Monitoring](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/directory_monitor)(`directory_monitor`) - Contributed by [@influxdata](https://github.com/influxdata) +- [NFS](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nfsclient)(`nfsclient`) - Contributed by [@pmoranga](https://github.com/pmoranga) +- [RavenDB](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ravendb)(`ravendb`) - Contributed by [@ml054](https://github.com/ml054) and [@bartoncasey](https://github.com/bartoncasey) + +#### Outputs +- [Grafana Loki](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/loki)(`loki`) - Contributed by [@eraac](https://github.com/eraac) +- [Sensu](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/sensu)(`sensu`) - Contributed by [@calebhailey](https://github.com/calebhailey) +- [SignalFX](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/signalfx)(`signalfx`) - Contributed by [@keitwb](https://github.com/keitwb) + +#### External +- [GeoIP](https://github.com/a-bali/telegraf-geoip)(`geoip`) - Contributed by [@a-bali](https://github.com/a-bali) +- [Plex Webhook](https://github.com/russorat/telegraf-webhooks-plex)(`plex`) - Contributed by [@russorat](https://github.com/russorat) +- [SMCIPMI](https://github.com/jhpope/smc_ipmi)(`smc_ipmi`) - Contributed by [@jhpope](https://github.com/jhpope) + +#### Aggregators +- [Derivative](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/derivative)(`derivative`) - Contributed by [@KarstenSchnitter](https://github.com/karstenschnitter) +- [Quantile](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/quantile)(`quantile`) - Contributed by [@srebhan](https://github.com/srebhan) + +#### Processors +- [AWS EC2 Metadata](https://github.com/influxdata/telegraf/tree/master/plugins/processors/aws/ec2)(`aws_ec2`) - Contributed by [@pmalek-sumo](https://github.com/pmalek-sumo) + +#### Parsers +- [XML](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/xml)(`xml`) - Contributed by [@srebhan](https://github.com/srebhan) + +#### Serializers +- [MessagePack](https://github.com/influxdata/telegraf/tree/master/plugins/serializers/msgpack)(`msgpack`) - Contributed by [@dialogbox](https://github.com/dialogbox) + + +## v.1.17.3 [2021-2-17] + +- Update to Go 1.15.8. + +### Input plugin updates + +- Filestat (`filestat`): Skip missing files. +- MQTT Consumer (`mqtt_consumer`): Fix reconnection issues. +- Ping (`ping`): + - Fix a timeout for `deadline` configuration. + - Update README with correct cmd for native ping on Linux. + - Fix percentile calculations. +- SNMP (`snmp`): Add support to expose IPv4/IPv6 as connection-schemes. +- x509 Certificate (`x509_cert`): Fix a timeout issue. +- +### Output plugin updates + +- InfluxDB v1.x (`influxdb`): Validate InfluxDB response after creating a database to avoid JSON parsing errors. +- Warp10 (`warp10`): Add support for commas in tags to be URL encoded. + +### Miscellaneous fixes and updates + +- Telegraf configuration file (`telegraf.conf`): Resolve issue reading `flush_jitter` output. +- Library updates: + - Update `github.com/gopcua/opcua` to 0.1.13. + - Update `go-ping` to latest version. + +## v.1.17.2 [2021-1-28] + +### Input plugin updates +- `ping`: + - Added support to the interface in native mode using either the name or IP address. + - Resolved regression from 1.17.1 by adding back missing function. + +## v.1.17.1 [2021-1-27] + +### Features +- Add Event Log support for Windows platforms. +- Allow specifying SNI hostnames in `common.tls`. + +### Input plugin updates +- `csv`: + - Add ability to define an array of string skip values. + - Address issue of ignoring missing values. +- `gnmi`: Metric path no longer has leading character truncated. +- `http_listener_v2`: Fixed an issue with `stop()` when plugin fails to start. +- `ipmi_sensor`: + - Add setting to enable caching. + - Add `hex_key` parameter. +- `jenkins`: Add support for inclusive job list. +- `lustre2`: No longer crashes if the field name and value are not separated. +- `ping`: Use go-ping library when `method = "native"` in the configuration +- `prometheus`: Use mime-type to handle protocol-buffer responses. +- `procstat`: + - Provide an option to include core count when reporting `cpu_usage` + - Use the same timestamp for all metrics in the same `Gather()` cycle. +- `postgresql_extensible`: Add timestamp column option to postgres_extensible to handle log-like queries. +- `snmp`: Extended the internal SNMP wrapper to support AES-192, AES-192C, AES-256, and AES-256C. +- `webhooks`: Use the `measurement` json field from the Particle.io webhook as the measurement name. +- `x509_cert`: Fixed a timeout issue +- `zookeeper`: Improve `mntr` regex expression to match user-specific keys. + +### Output plugin updates + +- `http`: Add option to control idle connection timeout. +- `influxdb_v2`: + - Log no longer flooded with errors when Elasticsearch receiver is in read-only state. + - Add exponential backoff and respecting client error responses. + +### Aggregator plugin updates +- `merge`: Performance optimization improvements. + +## v1.17.0 [2020-12-17] + +### Features +- Update Go to 1.15.5. +- Added support for Linux/ppc64le. + +### New plugins + +#### Inputs + +- [Intel Powerstat](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/intel_powerstat/README.md)(`intel_powerstat`) +- [Riemann Listener](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/riemann_listener/README.md)(`riemann`) + +#### Outputs + +- [Logz.io](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/logzio/README.md)(`logzio`) +- [Yandex Cloud Monitoring](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/yandex_cloud_monitoring/README.md)(`yandex_cloud_monitoring`) + +#### Output data formats (serializers) + +- [Prometheus Remote Write](https://github.com/influxdata/telegraf/blob/master/plugins/serializers/prometheusremotewrite/README.md)(`prometheusremotewrite`) + +#### Parsers + +- [Prometheus](https://github.com/influxdata/telegraf/blob/master/plugins/parsers/prometheus/README.md)(`prometheus`) + +### Input plugin updates + +- `aerospike`: Fix edge case where unexpected hex string was converted to integer if all digits. +- `bcache`: Fix tests for Windows. +- `bind`: Add configurable timeout. +- `carbon2`: Fix tests. +- `ecs`: Remove duplicated field from `ecs_task`. +- `execd`: Add support for new lines in line protocol fields. +- `github`: Add query of pull request statistics. +- `graphite`: Parse tags. +- `http`: Add proxy support. +- `http_response`: Fix network test. +- `jenkins`: Add build number field to `jenkins_job` measurement. +- `kafka_consumer`: Enable `ztsd` compression and idempotent writes. +- `kube_inventory`: + - Fix issue with missing metrics when pod has only pending containers. + - Update string parsing of allocatable cpu cores. +- `modbus`: Add FLOAT64-IEEE support. +- `monit`: Add `response_time`. +- `mysql`: Add per user metrics. +- `mqtt_consumer`: Fix issue with concurrent map write. +- `opcua`Add node groups. +- `ping`: + - Add percentiles. + - Fix potential issue with race condition. +- `snmp`: + - Add support for converting hex strings to integers. + - Translate field values. +- `socket_listener`: Fix crash when receiving invalid data. +- `sqlserver`: + - Add tags for monitoring readable secondaries for Azure SQL MI. + - Add SQL Server HA/DR Availability Group queries. + - Remove duplicate column (`session_db_name`). + - Add column `measurement_db_type` to output of all queries if not empty. +- `statsd`: Add configurable Max TTL duration. +- `vsphere`: Fix spelling of datacenter check. +- `win_services`: Add Glob pattern matching. +- `zfs`: Add dataset metrics. + +### Output plugin updates + +- `kafka`: Enable `ztsd` compression and idempotent writes. +- `nats`: Add `name` parameter. + +### Processor plugin updates + +- `starlark`: Can now store state between runs using a global state variable. + +## v1.16.3 [2020-12-01] + +### Features +- Update `godirwalk` to 1.16.1 for Dragonfly BSD support. + +### Input plugin updates +- APCUPSD (`apcupsd`): Add driver and CUDA version. +- CSV Parser (`csv`): Fix issue where CSV timestamp was being read as Unix instead of Go reference time. +- gNMI (`gnmi`): Add logging of `SubscribeResponse_Error` response types. + +- NVIDIA SMI (`nvidia_smi`): Add driver and CUDA version. +- PHP-FPM (`phpfpm`): Fix issue with "index out of range" error. +- SQL Server (`sqlserver`): Fix typo in `database_name` column. + +### Output plugin updates +- Wavefront (`wavefront`): + - Distinguish between retryable and non-retryable errors . + - Add debug-level logging for metric data that is not retryable. + +### Parser plugin updates +- Starlark (`starlark`): + - Allow the processor to manage errors that occur in the `apply` function. + - Add support for logging. + - Add capability to return multiple metrics. + +## v1.16.2 [2020-11-13] + +### Input plugin updates + +- CSV Parser (`csv`): Fix parsing multiple CSV files with different headers. +- DC/OS (`dcos`): Fix high-severity vulnerability in previous version of the `jwt-go` library. +- gNMI (`gnmi`): Add support for bytes encoding for gNMI messages. +- Proxmox ( `proxmox`): + - Fix a few issues with error reporting. + - Now ignores QEMU templates. +- RAS (`ras`): Fix tests failing on some systems. +- Redfish (`redfish`): Fix a parsing issue. +- SMART (`smart`): Fix an issue to recognize all devices from the configuration. +- SQL Server (`sqlserver`): Fix an issue with errors in on-premise instance queries. +- Systemd Units (`systemd_units`): Add `--plain` to the command invocation to fix an issue for reporting errors for units not found. +- vSphere (`vsphere`) + - Fix to how metrics were counted. + - Fix to metrics being skipped under in certain specific circumstances. + +### Output plugin updates + +- Dynatrace (`dynatrace`): Fix pushing metrics to separate Dynatrace environments. +- Wavefront (`wavefront`): Add `immediate_flush` tag. + +## v1.16.1 [2020-10-28] + +### Input plugin updates + +- Apache Kafka Consumer (`kafka_consumer`): Add Kafka SASL-mechanism authentication support for SCRAM-SHA-256, SCRAM-SHA-512, and GSSAPI. +- Microsoft SQL Server (`sqlserver`): + - Fix a syntax error in Azure queries. + - Remove synthetic performance counters that no longer exist from the `sqlserver_performance_counters` measurement. + - Add a new tag (`sql_version_desc`) to identify the readable SQL Server version. +- RAS (`ras`): + - Disable on specific Linux architectures (MIPS64, mips64le, ppc64le, riscv64). + - Fix an issue to properly close file handlers. +- Processes (`processes`): Fix an issue with receiving `no such file or directory` stat error. +- Windows Performance Counters (`win_perf_counters`): Fix an issue with the counter where a negative denominator error would cause gathering operations to fail. + +### Output plugin updates + +- Apache Kafka (`kafka`): Add Kafka SASL-mechanism authentication support for SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI. + +## v1.16.0 [2020-10-21] + +### New plugins + +#### Inputs + +- [InfluxDB v2 Listener Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/influxdb_v2_listener/README.md)(`influxdb_v2_listener`) - Contributed by [@magichair](https://github.com/magichair) +- [Intel RDT Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/intel_rdt/README.md)(`intel_rdt`) - Contributed by [@p-zak](https://github.com/p-zak) +- [NSD Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nsd/README.md)(`nsd`) - Contributed by [@gearnode](https://github.com/gearnode) +- [OPC UA Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/opcua/README.md)(`opcua`) - Contributed by [@influxdata](https://github.com/influxdata) +- [Proxmox Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/proxmox/README.md)(`proxmox`) - Contributed by [@effitient](https://github.com/effitient) +- [RAS Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/ras/README.md)(`ras`)- Contributed by [@p-zak](https://github.com/p-zak) +- [Windows Eventlog Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_eventlog/README.md)(`win_eventlog`) - Contributed by [@simnv](https://github.com/simnv) + +#### Outputs + +- [Dynatrace Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/dynatrace/README.md)(`dynatrace`) - Contributed by [@thschue](https://github.com/theschue) +- [Sumo Logic Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/sumologic/README.md) (`sumologic`) - Contributed by [@pmalek-sumo](https://github.com/pmalek-sumo) +- [Timestream Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/timestream) (`timestream`) - Contributed by [@piotrwest](https://github.com/piotrwest) + + +#### External + +- [Amazon Cloudwatch Alarms Input Plugin](https://github.com/vipinvkmenon/awsalarms)(`awsalarms`) - Contributed by [@vipinvkmenon](https://github.com/vipinvkmenon) +- [YouTube Input Plugin](https://github.com/inabagumi/youtube-telegraf-plugin)(`youtube`) - Contrbuted by [@inabagumi](https://github.com/inabagumi) +- [Octoprint Input Plugin](https://github.com/sspaink/octoprint-telegraf-plugin)[`octoprint`] - Contributed by [@sspaink](https://github.com/sspaink/) +- [Systemd Timings Input Plugin](https://github.com/pdmorrow/telegraf-execd-systemd-timings)(`systemd-timings`) - Contributed by [@pdmorrow](https://github.com/pdmorrow) + + +### Input plugin updates + +- `aerospike`: Add set and histogram reporting. +- `agent`: + - Send metrics in FIFO order. + - Fix issue with `execd restart_delay` being ignored. + - Sort plugin name lists for output. +- `clickhouse`: Add additional metrics. +- `cloudwatch`: Implement AWS CloudWatch Input Plugin ListMetrics API calls to use Active Metric Filter. +- `consul`: Add `metric_version` flag. +- `docker`: Fix vulnerabilities found in BDBA scan. +- `execd`: Fix issue with `restart_delay` being ignored. +- `gnmi`: Next message after send returns EOF. +- `http_listener_v2`: Make header tags case-insensitive. +- `http_response`: Match on status code. +- `jenkins`: Multiple escaping occurs on at certain folder depth. +- `kubernetes`: Add missing error check for HTTP requirement failure. +- `modbus`: Extend support of fixed point values on input. +- `mongodb`: Add pages written from cache metric. +- `net`: Fix broken link to `proc.c`. +- `snmp` Add agent host tag configuration option. +- `smart`: Add missing NVMe attributes. +- `sqlserver`: + - Database_type config to Split up sql queries by engine type + - Fixed query mapping + - New refactoring and formatting queries. + - Add more performance counters. +- `tail`: + - Close file to ensure it has been flushed. + - Fix following on EOF. + +### Output plugin updates + +- `elasticsearch`: Added `force_document_id` option to ES output enable resend data and avoid duplicated ES documents. +- `opentsdb`: Skips NaN and Inf JSON values. + +### Processor plugin updates + +- `execd`: Increased the maximum serialized metric size in line protocol +- `ifname`: Add `addTag` debugging. +- `starlark`: Add JSON parsing support. + +### Bug fixes + +- Fix `darwin` package build flags. +- `shim`: + - Fix bug with loading plugins with no config. + - Logger improvements. + - Fix issue with loading processor config from `execd`. +- Initialize aggregation processors. +- Fix arch name in `deb/rpm` builds. +- Fix issue with `rpm /var/log/telegraf` permissions +- Fix `docker-image make` target. +- Remove Event field from `serializers.splunkmetric`. +- Fix panic on streaming processors using logging +- `ParseError.Error` panic in `parsers.influx` +- Fix `procstat` performance regression +- Fix serialization when using `carbon2`. +- Fix bugs found by LGTM analysis platform. +- Update to Go 1.15.2 + +## v.1.15.3 [2020-09-11] + +### Features +- `processors.starlark`: + - Improve the quality of docs by executing them as tests. + - Add pivot example. +- `outputs.application_insights`: Added ability to set endpoint url. +- `inputs.sqlserver`: Added new counter - Lock Timeouts (timeout > 0)/sec. + +### Bug fixes + +- `agent`: Fix minor error message race condition. +- `build`: Update dockerfiles to Go 1.14. +- `shim`: + - Fix bug in logger affecting `AddError`. + - Fix issue with `config.Duration`. +- `inputs.eventhub_consumer`: Fix string to int conversion. +- `inputs.http_listener_v2`: Make http header tags case-insensitive. +- `inputs.modbus`: Extend support of fixed point values. +- `inputs.ping`: Fix issue for FreeBSD's ping6. +- `inputs.vsphere`: Fixed missing cluster name. +- `outputs.opentsdb` Fix JSON handling of values `NaN` and `Inf`. + +## v1.15.2 [2020-07-31] + +### Bug Fixes +- Fix RPM `/var/log/telegraf` permissions. +- Fix tail following on EOF. + +## v1.15.1 [2020-07-22] + +### Bug fixes + +- Fix architecture in non-amd64 deb and rpm packages. + +## v1.15.0 [2020-07-22] + +{{% warn %}} +Critical bug that impacted non-amd64 packages was introduced in 1.15.0. **Do not install this release.** Instead, install 1.15.1, which includes the features, new plugins, and bug fixes below. +{{% /warn %}} + +### Breaking changes + +Breaking changes are updates that may cause Telegraf plugins to fail or function incorrectly. If you have one of the following plugins installed, make sure to update your plugin as needed: + +- **Logparser** (`logparser`) input plugin: Deprecated. Use the `tail` input with `data_format = "grok"` as a replacement. +- **Cisco GNMI Telemetry** (`cisco_telemetry_gnmi`) input plugin: Renamed to `gnmi` to better reflect its general support for gNMI devices. +- **Splunkmetric** (`splunkmetric`) serializer: Several fields used primarily for debugging have been removed. If you are making use of these fields, they can be added back with the `tag` option. + +### New plugins + +#### Inputs + +- [NGINX Stream STS Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nginx_sts/README.md)(`nginx_sts`) - Contributed by [@zdmytriv](https://github.com/zdmytriv) +- [Redfish Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redfish/README.md)(`redfish`) - Contributed by [@sarvanikonda](https://github.com/sarvanikonda) + +#### Outputs + +- [Execd Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/execd/README.md)(`execd`) - Contributed by [@influxdata](https://github.com/influxdata) +- [New Relic Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/newrelic/README.md)(`newrelic`) - Contributed by @hsingkalsi +#### Processors + +- [Defaults Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/defaults/README.md)(`defaults`) - Contributed by [@jregistr](https://github.com/jregistr) +- [Execd Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/execd/README.md)(`execd`) - Contributed by [@influxdata](https://github.com/influxdata) +- [Filepath Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/filepath/README.md)(`filepath`) - Contributed by [@kir4h](https://github.com/kir4h) +- [Network Interface Name Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/ifname/README.md)(`ifname`) - Contributed by [@influxdata](https://github.com/influxdata) +- [Port Name Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/port_name/README.md)(`port_name`) - Contributed by [@influxdata](https://github.com/influxdata) +- [Reverse DNS Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/reverse_dns/README.md)(`reverse_dns`) - Contributed by [@influxdata](https://github.com/influxdata) +- [Starlark Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/README.md)(`starlark`) - Contributed by [@influxdata](https://github.com/influxdata) + +### Features + +- Telegraf's `--test` mode runs processors and aggregators before printing metrics. +- Official packages built with Go 1.14.5. +- When updating the Debian package, you will no longer be prompted to merge the `telegraf.conf` file. Instead, the new version will be installed to `/etc/telegraf/telegraf.conf.sample`. The `tar` and `zip` packages now include the version in the top-level directory. +- Allow per input overriding of `collection_jitter` and `precision`. +- Deploy Telegraf configuration as `telegraf.conf.sample`. +- Use Docker log timestamp as metric time. +- Apply ping deadline to DNS lookup. +- Support multiple templates for graphite serializers. +- Add configurable separator graphite serializer and output. +- Add support for SIGUSR1 to trigger flush. +- Add support for once mode that writes to outputs and exits. +- Run processors and aggregators during test mode. +- Add timezone configuration to CSV parser. + + +#### Input plugin updates + +- **Ceph Storage** (`ceph`): Add support for MDS and RGW sockets. +- **ECS** (`ecs`): Add v3 metadata support. +- **Fibaro** (`fibaro`): Add support for battery-level monitoring. +- **File** (`file`): + - Support UTF-16. + - Exclude `csv_timestamp_column` and `csv_measurement_column` from fields. +- **HTTP** (`http`): Add reading bearer token. +- **HTTP Listener v2** (`http_listener_v2`): Add ability to specify HTTP headers as tags. +- **HTTP Response** (`http_response`): + - Add authentication support. + - Allow collection of HTTP headers. + - Add ability to collect response body as field. +- **Icinga 2** (`icinga2`): + - Fix source field. + - Add tag for server hostname. +- **InfluxDB Listener** (`influxdb_listener`): Add option to save retention policy as tag. +- **IPtables** (`iptables`): Extract target as a tag for each rule. +- **Kibana** (`kibana`): Fix `json unmarshal` error. +- **Kubernetes Inventory** (`kube_inventory`): Add ability to add selectors as tags. +- **Mem** (`mem`): Add laundry on FreeBSD. +- **Microsoft SQL Server** (`sqlserver`): + - Add `VolumeSpace` query. + - Add `cpu` query. + - Add counter type to `perfmon` collector. + - Improve compatibility with older server versions. + - Fix typo in `total_elapsed_time_ms` field. +- **Modbus** (`modbus`): + - Add support for 64-bit integer types. + - Add retry when replica is busy. + - Add ability to specify measurement per register. +- **MongoDB** (`monogdb`): + - Add commands stats. + - Add additional fields. + - Add cluster state integer. + - Add option to disable cluster status. + - Add additional conccurrent transaction information. +- **NVIDIA SMI** (`nvidia_smi`): Add video codec stats. +- **Procstat** (`procstat`): + - Improve performance. + - Fix memory leak. +- **S.M.A.R.T.** (`smart`): Add missing `nvme` attributes. +- **SNMP Trap** (`snmp_trap`): Add SNMPv3 trap support. +- **System** (`system`): Fix incorrect uptime when clock is adjusted. +- **Tail** (`tail`): Support UTF-16. + +#### Output plugin updates + +- **Enum** (`enum`): Add integer mapping support. + +#### Processor plugin updates + +- **Date** (`date`): + - Add field creation. + - Add integer unix time support. +- **Wavefront** (`wavefront`): Add `truncate_tags` setting. + + +### Bug fixes +- Fix ability to write metrics to CloudWatch with IMDSv1 disabled. +- Fix vSphere 6.7 missing data issue. +- Fix gzip support in `socket_listener` with tcp sockets. +- Fix interval drift when `round_interval` is set in agent. +- Fix incorrect uptime when clock is adjusted. +- Remove trailing backslash from tag keys/values in `influx` serializer. +- Fix incorrect Azure SQL DB server properties. +- Send metrics in FIFO order. + +## v1.14.5 [2020-06-30] + +### Bug fixes + +- Improve the performance of the `procstat` input. +- Fix ping exit code handling on non-Linux operating systems. +- Fix errors in output of the `sensors` command. +- Prevent startup when tags have incorrect type in configuration file. +- Fix panic with GJSON multiselect query in JSON parser. +- Allow any key usage type on x509 certificate. +- Allow histograms and summary types without buckets or quantiles in `prometheus_client` output. + +## v1.14.4 [2020-06-09] + +### Bug fixes + +- Fix the `cannot insert the value NULL` error with the `PerformanceCounters` query in the `sqlServer` input plugin. +- Fix a typo in the naming of `the gc_cpu_fraction` field in the `influxdb` input plugin. +- Fix a numeric to bool conversion in the `converter` processor. +- Fix an issue with the `influx` stream parser blocking when the data is in buffer. + +## v1.14.3 [2020-05-19] + +### Bug fixes + +- Use same timestamp for all objects in arrays in the `json` parser. +- Handle multiple metrics with the same timestamp in `dedup` processor. +- Fix reconnection of timed out HTTP2 connections `influxdb` outputs. +- Fix negative value parsing in `impi_sensor` input. + +## v1.14.2 [2020-04-28] + +### Bug fixes + +- Trim white space from instance tag in `sqlserver` input . +- Use increased AWS Cloudwatch GetMetricData limit of 500 metrics per call. +- Fix limit on dimensions in `azure_monitor` output. +- Fix 64-bit integer to string conversion in `snmp` input. +- Fix shard indices reporting in `elasticsearch` input plugin. +- Ignore fields with Not a Number or Infinity floats in the JSON serializer. +- Fix typo in name of `gc_cpu_fraction` field of the `kapacitor` input. +- Don't retry create database when using database_tag if forbidden by the server in `influxdb` output. +- Allow CR and FF inside of string fields in InfluxDB line protocol parser. + +## v1.14.1 [2020-04-14] + +### Bug fixes + +- Fix `PerformanceCounter` query performance degradation in `sqlserver` input. +- Fix error when using the `Name` field in template processor. +- Fix export timestamp not working for Prometheus on v2. +- Fix exclude database and retention policy tags. +- Fix status path when using globs in `phpfpm`. + +## v1.14 [2020-03-26] + +### Breaking changes + +Breaking changes are updates that may cause Telegraf plugins to fail or function incorrectly. If you have one of the following plugins installed, make sure to update your plugin as needed: + +- **Microsoft SQL Server** (`sqlserver`) input plugin: Renamed the `sqlserver_azurestats` measurement to `sqlserver_azure_db_resource_stats` to resolve an issue where numeric metrics were previously being reported incorrectly as strings. +- **Date** (`date`) processor plugin: Now uses the UTC timezone when creating its tag. Previously, the local time was used. + +{{% note %}} +Support for SSL v3.0 is deprecated in this release. +Telegraf now uses the [Go TLS library](https://golang.org/pkg/crypto/tls/). +{{% /note %}} + +### New plugins + +#### Inputs + +- [Arista LANZ Consumer](https://github.com/influxdata/telegraf/blob/release-1.14/plugins/inputs/lanz/README.md) - Contributed by [@timhughes](https://github.com/timhughes) +- [ClickHouse](https://github.com/influxdata/telegraf/blob/release-1.14/plugins/inputs/clickhouse/README.md)(`clickhouse`) - Contributed by [@kshvakov](https://github.com/kshvakov) +- [Execd](https://github.com/influxdata/telegraf/blob/release-1.14/plugins/inputs/execd/README.md)(`execd`) - Contributed by [@jgraichen](https://github.com/jgraichen) +- [Event Hub Consumer](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/eventhub_consumer/README.md)(`eventhub_consumer`) - Contributed by [@R290](https://github.com/R290) +- [InfiniBand](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/infiniband/README.md)(`infiniband`) - Contributed by [@willfurnell](https://github.com/willfurnell) +- [Modbus](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/modbus/README.md)(`modbus`) - Contributed by [@garciaolais](https://github.com/garciaolais) +- [Monit](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/monit/README.md)(`monit`) - Contributed by [@SirishaGopigiri](https://github.com/SirishaGopigiri) +- [SFlow](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/sflow/README.md)(`sflow`) - Contributed by [@influxdata](https://github.com/influxdata) +- [Wireguard](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/wireguard/README.md)(`wireguard`) - Contributed by [@LINKIWI](https://github.com/LINKIWI) + +#### Processors + +- [Dedup](https://github.com/influxdata/telegraf/blob/master/plugins/processors/dedup/README.md)(`dedup`) - Contributed by [@igomura](https://github.com/igomura) +- [S2 Geo](https://github.com/influxdata/telegraf/blob/master/plugins/processors/s2geo/README.md)(`s2geo`) - Contributed by [@alespour](https://github.com/alespour) +- [Template](https://github.com/influxdata/telegraf/blob/master/plugins/processors/template/README.md) (`template`) - Contributed by [@RobMalvern](https://github.com/RobMalvern) + +#### Outputs + +- [Warp10](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/warp10/README.md)(`warp10`) - Contributed by [@aurrelhebert](https://github.com/aurrelhebert) + +### Features + +#### Input plugin updates + +- **Apache Kafka Consumer** (`kafka_consumer`): Add SASL version control to support Microsoft Azure Event Hub. +- **Apcupsd** (`apcupsd`): Add new tag `model` and new metrics: `battery_date`, `nominal_input_voltage`, `nominal_battery_voltage`, `nominal_power`, `firmware`. +- **Cisco Model-driven Telemetry (MDT)** (`cisco_telemetry_gnmi`) input plugin: + - Add support for GNMI DecimalVal type. + - Replace dash (`-`) with underscore (`_`) when handling embedded tags. +- **DiskIO** (`diskio`): Add counters for merged reads and writes. +- **IPMI Sensor** (`ipmi_sensor`): Add `use_sudo` option. +- **Jenkins** (`jenkins`): + - Add `source` and `port` tags to `jenkins_job` metrics. + - Add new fields `total_executors` and `busy_executors`. +- **Kubernetes** (`kubernetes`): Add ability to collect pod labels. +- **Microsoft SQL Server** (`sqlserver`): + - Add RBPEX IO statistics to DatabaseIO query. + - Add space on disk for each file to DatabaseIO query. + - Calculate DB Name instead of GUID in `physical_db_name`. + - Add `DatabaseIO` TempDB per Azure DB. + - Add `query_include` option for explicitly including queries. + - Add `volume_mount_point` to DatabaseIO query. +- **MongoDB** (`mongodb`): + - Add `page_faults` for WiredTiger storage engine. + - Add latency statistics. + - Add replica set tag (`rs_name`). +- **NATS Consumer** (`nats_consumer`): Add support for credentials file. +- **NGINX Plus API** (`nginx_plus_api`): Add support for new endpoints. +- **OpenLDAP** (`openldap`): Add support for MDB database information. +- **PHP-FPM** (`phpfpm`): Allow globs in FPM unix socket paths (`unixsocket`). +- **Procstat** (`procstat`): Add process `created_at` time. +- **Prometheus** (`prometheus`) input plugin: Add `label` and `field` selectors for Kubernetes service discovery. +- **RabbitMQ** (`rabbitmq`): Add `slave_nodes` and `synchronized_slave_nodes` metrics. +- **StatsD** (`statsd`): Add UDP internal metrics. +- **Unbound** (`unbound`): Expose [`-c cfgfile` option of `unbound-control`](https://linux.die.net/man/8/unbound-control) and set the default unbound configuration (`config_file= "/etc/unbound/unbound.conf`) in the Telegraf configuration file. +- **VMware vSphere** (`vsphere`): Add option to exclude resources by inventory path, including `vm_exclude`, `host_exclude`, `cluster_exclude` (for both clusters and datastores), and `datacenter_exclude`. +- **X.509 Certificate** (`x509_cert`): Add `server_name` override. + +#### Output plugin updates + +- **Apache Kafka** (`kafka`): Add `topic_tag` and `exclude_topic_tag` options. +- **Graylog** (`graylog`): Allow a user defined field (`short_message_field`) to be used as the `GELF short_message`. +- **InfluxDB v1.x** (`influxdb`): Add support for setting the retention policy using a tag (`retention_policy_tag`). +- **NATS Output** (`nats`): Add support for credentials file. + +#### Aggregator plugin updates + +- **Histogram** (`histogram`): Add non-cumulative histogram. + +#### Processor plugin updates + +- **Converter** (`converter`): Add support for converting `tag` or `field` to `measurement`. +- **Date** (`date`): Add date offset and timezone options. +- **Strings** (`strings`): Add support for titlecase transformation. + +### Bug fixes + +- Fix Telegraf log rotation to use actual file size instead of bytes written. +- Fix internal Telegraf metrics to prevent output split into multiple lines. +- **Chrony** (`chrony`) input plugin: When plugin is enabled, search for `chronyc` only. +- **Microsoft SQL Server** (`sqlserver`) input plugin: + - Fix conversion to floats in AzureDBResourceStats query. + - Fix case sensitive collation. + - Fix several issues with DatabaseIO query. + - Fix schedulers query compatibility with pre SQL-2016. +- **InfluxDB Listener** (`influxdb_listener`): + - Fix request failing with EOF. + - Continue parsing after error. + - Set headers on ping URL. + +## v1.13.4 [2020-02-25] + +### Release Notes +Official packages now built with Go 1.13.8. + +### Bug fixes +- Parse NaN values from summary types in Prometheus (`prometheus`) input plugin. +- Fix PgBouncer (`pgbouncer`) input plugin when used with newer PgBouncer versions. +- Support up to 8192 stats in the Ethtool (`ethtool`) input plugin. +- Fix performance counters collection on named instances in Microsoft SQL Server (`sqlserver`) input plugin. +- Use add time for Prometheus expiration calculation. +- Fix inconsistency with input error counting in Telegraf v1.x (`internal`) input plugin. +- Use the same timestamp per call if no time is provided in Prometheus (`prometheus`) input plugin. + +## v1.13.3 [2020-02-04] + +### Bug fixes + +- Update Kibana (`kibana`) input plugin to support Kibana 6.4 and later. +- Prevent duplicate `TrackingIDs` from being returned in the following queue consumer input plugins: + - Amazon Kineses Consumer (`kinesis_consumer`) + - AMQP Consumer (`amqp_consumer`) + - Apache Consumer (`apache_consumer`) + - MQTT Consumer (`mqtt_consumer`) + - NATS Consumer (`nats_consumer`) + - NSQ Consumer (`nsq_consumer`) +- Increase support for up to 4096 statistics in the Ethtool (`ethtool`) input plugin. +- Remove expired metrics from the Prometheus Client (`prometheus_client`) output plugin. Previously, expired metrics were only removed when new metrics were added. + +## v1.13.2 [2020-01-21] + +### Bug fixes + +- Warn without error when Processes (`processes`) input is started on Windows. +- Only parse certificate blocks in X.509 Certificate (`x509_cert`) input plugin. +- Add custom attributes for all resource types in VMware vSphere (`vsphere`) input plugin. +- Support URL agent address form with UDP in SNMP (`snmp`) input plugin. +- Record device fields in the SMART (`smart`) input plugin when attributes is `false`. +- Remove invalid timestamps from Kafka messages. +- Update `json` parser to fix `json_strict` option and set `default` to `true`. + +## v1.13.1 [2020-01-08] + +### Bug fixes +- Fix ServerProperty query stops working on Azure after failover. +- Add leading period to OID in SNMP v1 generic traps. +- Fix missing config fields in prometheus serializer. +- Fix panic on connection loss with undelivered messages in MQTT Consumer + (`mqtt_consumer`) input plugin. +- Encode query hash fields as hex strings in SQL Server (`sqlserver`) input plugin. +- Invalidate diskio cache if the metadata mtime has changed. +- Show platform not supported warning only on plugin creation. +- Fix rabbitmq cannot complete gather after request error. +- Fix `/sbin/init --version` executed on Telegraf startup. +- Use last path element as field key if path fully specified in Cisco GNMI Telemetry + (`cisco_telemetry_gnmi`) input plugin. + +## v1.13 [2019-12-12] + +### Release Notes +Official packages built with Go 1.13.5. +The Prometheus Format (`prometheus`) input plugin and Prometheus Client (`prometheus_client`) +output have a new mapping to and from Telegraf metrics, which can be enabled by setting `metric_version = 2`. +The original mapping is deprecated. When both plugins have the same setting, +passthrough metrics are unchanged. +Refer to the [Prometheus input plugin](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/prometheus/README.md) +for details about the mapping. + +### New Inputs +- [Azure Storage Queue](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/azure_storage_queue/README.md) + (`azure_storage_queue`) - Contributed by [@mjiderhamn](https://github.com/mjiderhamn) +- [Ethtool](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/ethtool/README.md) + (`ethtool`) - Contributed by [@philippreston](https://github.com/philippreston) +- [SNMP Trap](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/snmp_trap/README.md) + (`snmp_trap`) - Contributed by [@influxdata](https://github.com/influxdata) +- [Suricata](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/suricata/README.md) + (`suricata`) - Contributed by [@satta](https://github.com/satta) +- [Synproxy](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/synproxy/README.md) + (`synproxy`) - Contributed by [@rfrenayworldstream](https://github.com/rfrenayworldstream) +- [Systemd Units](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/systemd_units/README.md) + (`systemd_units`) - Contributed by [@benschweizer](https://github.com/benschweizer) + +### New Processors +- [Clone](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/processors/clone/README.md) + (`clone`) - Contributed by [@adrianlzt](https://github.com/adrianlzt) + +### New Aggregators +- [Merge](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/aggregators/merge/README.md) + (`merge`) - Contributed by [@influxdata](https://github.com/influxdata) + +### Features +- Add per node memory stats to RabbitMQ (`rabbitmq`) input plugin. +- Add ability to read query from file to PostgreSQL (`postgresql_extensible`) input plugin. +- Add replication metrics to the Redis (`redis`) input plugin. +- Support NX-OS telemetry extensions in Cisco Model-driven Telemetry (`cisco_telemetry_mdt`) + input plugin. +- Allow `graphite` parser to create `Inf` and `NaN` values. +- Use prefix base detection for ints in `grok` parser. +- Add more performance counter metrics to Microsoft SQL Server (`sqlserver`) input plugin. +- Add millisecond unix time support to `grok` parser. +- Add container ID as optional source tag to Docker (`docker`) and Docker Log + (`docker_log`) input plugins. +- Add `lang` parameter to OpenWeatherMap (`openweathermap`) input plugin. +- Log file open errors at debug level in Tail (`tail`) input plugin. +- Add timeout option to Amazon CloudWatch (`cloudwatch`) input plugin. +- Support custom success codes in HTTP (`http`) input plugin. +- Improve IPVS (`ipvs`) input plugin error strings and logging. +- Add strict mode to JSON parser that can be disabled to ignore invalid items. +- Add support for Kubernetes 1.16 and remove deprecated API usage. +- Add gathering of RabbitMQ federation link metrics. +- Add bearer token defaults for Kubernetes plugins. +- Add support for SNMP over TCP. +- Add support for per output flush jitter. +- Add a nameable file tag to File (`file`) input plugin. +- Add Splunk MultiMetric support. +- Add support for sending HTTP Basic Auth in InfluxDB (`influxdb`) input plugin. +- Add ability to configure the url tag in the Prometheus Format (`prometheus`) input plugin. +- Add Prometheus `metric_version=2` mapping to internal metrics/line protocol. +- Add Prometheus `metric_version=2` support to Prometheus Client (`prometheus_client`) output plugin. +- Add content_encoding compression support to Socket Listener (`socket_listener`) input plugin. +- Add high resolution metrics support to Amazon CloudWatch (`cloudwatch`) output plugin. +- Add `SReclaimable` and `SUnreclaim ` to Memory (`mem`) input plugin. +- Allow multiple certificates per file in X.509 Certificate (`x509_cert`) input plugin. +- Add additional tags to the X.509 Certificate (`x509_cert`) input plugin. +- Add batch data format support to File (`file`) output plugin. +- Support partition assignment strategy configuration in Apache Kafka Consumer + (`kafka_consumer`) input plugin. +- Add node type tag to MongoDB (`mongodb`) input plugin. +- Add `uptime_ns` field to MongoDB (`mongodb`) input plugin. +- Support resolution of symlinks in Filecount (`filecount`) input plugin. +- Set message timestamp to the metric time in Apache Kafka (`kafka`) output plugin. +- Add base64decode operation to String (`string`) processor. +- Add option to control collecting global variables to MySQL (`mysql`) input plugin. + +### Bug fixes +- Show correct default settings in MySQL (`mysql`) sample configuration. +- Use `1h` or `3h` rain values as appropriate in OpenWeatherMap (`openweathermap`) input plugin. +- Fix `not a valid field` error in Windows with Nvidia SMI (`nvidia_smi`) input plugin. +- Fix InfluxDB (`influxdb`) output serialization on connection closed. +- Fix ping skips remaining hosts after DNS lookup error. +- Log MongoDB oplog auth errors at debug level. +- Remove trailing underscore trimming from json flattener. +- Revert change causing CPU usage to be capped at 100 percent. +- Accept any media type in the Prometheus Format (`prometheus`) input plugin. +- Fix unix socket dial arguments in uWSGI (`uwsgi`) input plugin. +- Replace colon characters in Prometheus (`prometheus_client`) output labels with `metric_version=1`. +- Set TrimLeadingSpace when TrimSpace is on in CSV (`csv`) parser. + +## v1.12.6 [2019-11-19] + +### Bug fixes +- Fix many plugin errors logged at debug logging level. +- Use nanosecond precision in Docker Log (`docker_log`) input plugin. +- Fix interface option with `method = native` in Ping (`ping`) input plugin. +- Fix panic in MongoDB (`mongodb`) input plugin if shard connection pool stats are unreadable. + +## v1.12.5 [2019-11-12] + +### Bug fixes +- Fix incorrect results in Ping (`ping`) input plugin. +- Add missing character replacement to `sql_instance` tag. +- Change `no metric` error message to `debug` level in CloudWatch (`cloudwatch`) input plugin. +- Add missing `ServerProperties` query to SQLServer (`sqlserver`) input plugin documentation. +- Fix MongoDB `connections_total_created` field loading. +- Fix metric creation when node is offline in Jenkins (`jenkins`) input plugin. +- Fix Docker `uptime_ns` calculation when container has been restarted. +- Fix MySQL field type conflict in conversion of `gtid_mode` to an integer. +- Fix MySQL field type conflict with `ssl_verify_depth` and `ssl_ctx_verify_depth`. + +## v1.12.4 [2019-10-23] + +- Build official packages with Go 1.12.12. + +### Bug fixes +- Fix metric generation with Ping (`ping`) input plugin `native` method. +- Exclude alias tag if unset from plugin internal stats. +- Fix `socket_mode` option in PowerDNS Recursor (`powerdns_recursor`) input plugin. + +## v1.12.3 [2019-10-07] + +- Build official packages with Go 1.12.10. + +### Bug fixes +- Use batch serialization format in Exec (`exec`) output plugin. +- Use case-insensitive serial number match in S.M.A.R.T. (`smart`) input plugin. +- Add authorization header only when environment variable is set. +- Fix issue when running multiple MySQL and SQL Server plugin instances. +- Fix database routing on retry with `exclude_database_tag`. +- Fix logging panic in Exec (`exec`) input plugin with Nagios data format. + +## v1.12.2 [2019-09-24] + +### Bug fixes +- Fix timestamp format detection in `csv` and `json` parsers. +- Apcupsd input (`apcupsd`) + - Fix parsing of `BATTDATE`. +- Keep boolean values listed in `json_string_fields`. +- Disable Go plugin support in official builds. +- Cisco GNMI Telemetry input (`cisco_telemetry_gnmi`) + - Fix path handling issues. + +## v1.12.1 [2019-09-10] + +### Bug fixes +- Fix dependenciess on GLIBC_2.14 symbol version. +- Filecount input (`filecount`) + - Fix filecount for paths with trailing slash. +- Icinga2 input (`icinga2`) + - Convert check state to an integer. +- Apache Kafka Consumer input (`kafka_consumer`) + - Fix could not mark message delivered error. +- MongoDB input (`mongodb`) + - Skip collection stats when disabled. +- HTTP Response input (`http_response`) + - Fix error reading closed response body. +- Apcupsd input (`apcupsd`) + - Fix documentation to reflect plugin. +- InfluxDB v2 output (`influxdb_v2`) + - Display retry log message only when retry after is received. + + +## v1.12 [2019-09-03] + +### Release Notes +- The cluster health related fields in the Elasticsearch input have been split out + from the `elasticsearch_indices` measurement into the new `elasticsearch_cluster_health_indices` + measurement as they were originally combined by error. + +### New Inputs +- [Apcupsd](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/apcupsd/README.md) (`apcupsd`) - Contributed by @jonaz +- [Docker Log](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker_log/README.md) (`docker_log`) - Contributed by @prashanthjbabu +- [Fireboard](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/fireboard/README.md) (`fireboard`) - Contributed by @ronnocol +- [Logstash](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/logstash/README.md) (`logstash`) - Contributed by @lkmcs @dmitryilyin @arkady-emelyanov +- [MarkLogic](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/marklogic/README.md) (`marklogic`) - Contributed by @influxdata +- [OpenNTPD](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/openntpd/README.md) (`openntpd`) - Contributed by @aromeyer +- [uWSGI](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/uwsgi) (`uwsgi`) - Contributed by @blaggacao + +### New Parsers +- [From Urlencoded](https://github.com/influxdata/telegraf/blob/master/plugins/parsers/form_urlencoded) (`form_urlencoded`) - Contributed by @byonchev + +### New Processors +- [Date](https://github.com/influxdata/telegraf/blob/master/plugins/processors/date/README.md) (`date`) - Contributed by @influxdata +- [Pivot](https://github.com/influxdata/telegraf/blob/master/plugins/processors/pivot/README.md) (`pivot`) - Contributed by @influxdata +- [Tag Limit](https://github.com/influxdata/telegraf/blob/master/plugins/processors/tag_limit/README.md) (`tag_limit`) - Contributed by @memory +- [Unpivot](https://github.com/influxdata/telegraf/blob/master/plugins/processors/unpivot/README.md) (`unpivot`) - Contributed by @influxdata + +### New Outputs +- [Exec](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/exec/README.md) (`exec`) - Contributed by @Jaeyo + +### Features +- Improve performance of `wavefront` serializer. +- Allow `regex` processor to append tag values. +- Add `starttime` field to `phpfpm` input. +- Add cluster name tag to elasticsearch indices. +- Add support for interface field in `http_response` input plugin. +- Add container uptime_ns in `docker` input plugin. +- Add better user-facing errors for API timeouts in docker input. +- Add TLS mutual auth support to `jti_openconfig_telemetry` input. +- Add support for ES 7.x to `elasticsearch` output. +- Add basic auth to `prometheus` input plugin. +- Add node roles tag to `elasticsearch` input. +- Support floats in `statsd` percentiles. +- Add native Go ping method to `ping` input plugin. +- Resume from last known offset in `tail` input when reloading Telegraf. +- Add improved support for Azure SQL Database to `sqlserver` input. +- Add extra attributes for NVMe devices to `smart` input. +- Add `docker_devicemapper` measurement to `docker` input plugin. +- Add basic auth support to `elasticsearch` input. +- Support string field glob matching in `json` parser. +- Update gjson to allow multipath syntax in `json` parser. +- Add support for collecting SQL Requests to identify waits and blocking to `sqlserver` input. +- Collect k8s endpoints, ingress, and services in `kube_inventory` plugin. +- Add support for field/tag keys to `strings` processor. +- Add certificate verification status to `x509_cert` input. +- Support percentage value parsing in `redis` input. +- Load external Go plugins from `--plugin-directory`. +- Add ability to exclude db/bucket tag from `influxdb` outputs. +- Gather per collections stats in `mongodb` input plugin. +- Add TLS & credentials configuration for `nats_consumer` input plugin. +- Add support for enterprise repos to `github` plugin. +- Add Indices stats to `elasticsearch` input. +- Add left function to `string` processor. +- Add grace period for metrics late for aggregation. +- Add `diff` and `non_negative_diff` to `basicstats` aggregator. +- Add device tags to `smart_attributes`. +- Collect `framework_offers` and `allocator` metrics in `mesos` input. +- Add Telegraf and Go version to the `internal` input plugin. +- Update the number of logical CPUs dynamically in `system` plugin. +- Add darwin (macOS) builds to the release. +- Add configurable timeout setting to `smart` input. +- Add `memory_usage` field to `procstat` input plugin. +- Add support for custom attributes to `vsphere` input. +- Add `cmdstat` metrics to `redis` input. +- Add `content_length` metric to `http_response` input plugin. +- Add `database_tag` option to `influxdb_listener` to add database from query string. +- Add capability to limit TLS versions and cipher suites. +- Add `topic_tag` option to `mqtt_consumer`. +- Add ability to label inputs for logging. +- Add TLS support to `nginx_plus`, `nginx_plus_api` and `nginx_vts`. + +### Bug fixes +- Fix sensor read error stops reporting of all sensors in `temp` input. +- Fix double pct replacement in `sysstat` input. +- Fix race in master node detection in `elasticsearch` input. +- Fix SSPI authentication not working in `sqlserver` input. +- Fix memory error panic in `mqtt` input. +- Support Kafka 2.3.0 consumer groups. +- Fix persistent session in `mqtt_consumer`. +- Fix finder inconsistencies in `vsphere` input. +- Fix parsing multiple metrics on the first line of tailed file. +- Send TERM to `exec` processes before sending KILL signal. +- Query oplog only when connected to a replica set. +- Use environment variables to locate Program Files on Windows. + +## v1.11.5 [2019-08-27] + +### Bug fixes +- Update `go-sql-driver/mysql` driver to 1.4.1 to address auth issues. +- Return error status from `--test` if input plugins produce an error. +- Fix with multiple instances only last configuration is used in smart input. +- Build official packages with Go 1.12.9. +- Split out `-w` argument in `iptables` input plugin. +- Add support for parked process state on Linux. +- Remove leading slash from rcon command. +- Allow jobs with dashes in the name in `lustre2` input plugin. + +## v1.11.4 [2019-08-06] + +### Bug fixes + +#### Plugins +- Kubernetes input (`kubernetes`) + - Correct typo in `logsfs_available_bytes` field. +- Datadog output (`datadog`) + - Skip floats that are `NaN` or `Inf`. +- Socket Listener input (`socket_listener`) + - Fix reload panic. + +## v1.11.3 [2019-07-23] + +### Bug fixes + +#### Agent + +- Treat empty array as successful parse in JSON parser. +- Fix template pattern partial wildcard matching. + +#### Plugins + +- Bind input (`bind`) + - Add missing `rcode` and `zonestat`. +- GitHub input + - - Fix panic. +- Lustre2 input (`lustre2`) + - Fix config parse regression. +- NVIDIA-SMI output (`nvidia-smi`) + - Handle unknown error. +- StatD input (`statd`) + - Fix panic when processing Datadog events. +- VMware vSphere input (`vsphere`) + - Fix unable to reconnect after vCenter reboot. + +## v1.11.2 [2019-07-09] + +### Bug fixes + +#### Plugins + +- Bind input (`bind`) + - Fix `value out of range` error on 32-bit systems. +- Burrow input (`burrow`) + - Apply topic filter to partition metrics. +- Filecount input (`filecount`) + - Fix path separator handling in Windows. +- Logparser input (`logparser`) + - Fix stop working after reload. +- Ping input (`ping`) + - Fix source address ping flag on BSD. +- StatsD input (`statsd`) + - Fix panic with empty Datadog tag string. +- Tail input (`tail`) + - Fix stop working after reload. + +## v1.11.1 [2019-06-25] + +### Bug fixes + +#### Agent + +- Fix panic if `pool_mode` column does not exist. +- Add missing `container_id` field to `docker_container_status` metrics. +- Add `device`, `serial_no`, and `wwn` tags to synthetic attributes. + +#### Plugins + +- Cisco GNMI Telemetry input (`cisco_telemetry_gnmi`) + - Omit keys when creating measurement names for GNMI telemetry. +- Disk input (`disk`) + - Cannot set `mount_points` option. +- NGINX Plus API input (`nginx_plus_api`) + - Skip 404 error reporting. +- Procstat input (`procstat`) + - Don't consider `pid` of `0` when using systemd lookup. +- StatsD input (`statsd`) + - Fix parsing of remote TCP address. +- System input (`system`) + - Ignore error when `utmp` is missing. + +## v1.11.0 [2019-06-11] + +- System (`system`) input plugin + - The `uptime_format` field has been deprecated — use the `uptime` field instead. +- Amazon Cloudwatch Statistics (`cloudwatch`) input plugin + - Updated to use a more efficient API and now requires `GetMetricData` permissions + instead of `GetMetricStatistics`. The `units` tag is not + available from this API and is no longer collected. + +### New input plugins + +- [BIND 9 Nameserver Statistics (`bind`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek +- [Cisco GNMI Telemetry (`cisco_telemetry_gnmi`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/cisco_telemetry_gnmi/README.md) - Contributed by @sbyx +- [Cisco Model-driven Telemetry (`cisco_telemetry_mdt`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/cisco_telemetry_mdt/README.md) - Contributed by @sbyx +- [ECS (`ecs`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/ecs/README.md) - Contributed by @rbtr +- [GitHub (`github`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/github/README.md) - Contributed by @influxdata +- [OpenWeatherMap (`openweathermap`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/openweathermap/README.md) - Contributed by @regel +- [PowerDNS Recursor (`powerdns_recursor`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/powerdns_recursor/README.md) - Contributed by @dupondje + +### New aggregator plugins + +- [Final (`final`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/aggregators/final/README.md) - Contributed by @oplehto + +### New output plugins + +- [Syslog (`syslog`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/outputs/syslog/README.md) - Contributed by @javicrespo +- [Health (`health`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/outputs/health/README.md) - Contributed by @influxdata + +### New output data formats (serializers) + +- [wavefront](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/serializers/wavefront/README.md) - Contributed by @puckpuck + +### Features + +#### Agent + +- Add CLI support for outputting sections of the configuration. +- Add `service-display-name` option for use with Windows service. +- Add support for log rotation. +- Allow env vars `${}` expansion syntax in configuration file. +- Allow devices option to match against devlinks. + +### Input data formats + +- Nagios + - Add support for multiple line text and perfdata. + +#### Input plugins + +- AMQP Consumer (`amqp_consumer`) + - Support passive queue declaration. + - Add support for gzip compression. +- Amazon Cloudwatch Statistics (`cloudwatch`) + - Use more efficient GetMetricData API to collect Cloudwatch metrics. + - Allow selection of collected statistic types in cloudwatch input. +- Apache Solr (`solr`) + - Add support for HTTP basic auth. +- Hddtemp (`hddtemp`) + - Add source tag. +- InfluxDB Listener (`influxdb_listener`) + - Support verbose query parameter in ping endpoint. +- NVIDIA SMI (`nvidia-smi`) + - Extend metrics collected from Nvidia GPUs. +- Net (`net`) + - Speed up interface stat collection. +- PHP FM (`phpfm`) + - Enhance HTTP connection options. +- Ping (`ping`) + - Add TTL field. +- Procstat (`procstat`) + - Add `cmdline` tag. + - Add pagefault data. +- Prometheus (`prometheus`) + - Add namespace restriction. +- SMART (`smart`) + - Support more drive types. +- Socket Listener (`socket_listener`) + - Add option to set permissions for UNIX domain sockets. +- StatsD (`statsd`) + - Add support for Datadog events. + +### Output plugins + +- AMQP (`amqp`) + - Add support for gzip compression. +- File (`file`) + - Add file rotation support. +- Stackdriver (`stackdriver`) + - Set user agent. +-- VMware Wavefront (`wavefront`) + - Add option to use strict sanitization rules. + +### Aggregator plugins + +- Histogram aggregator + - Add option to reset buckets on flush. + +#### Processor plugins + +- Converter (`converter`) + - Add hexadecimal string to integer conversion. +- Enum (`enum`) + - Support tags. + +### Bug fixes + +#### Agent + +- Create Windows service only when specified or in service manager. +- Don't start Telegraf when stale pid file found. +- Fix inline table support in configuration file. +- Fix multi-line basic strings support in configuration file. +- Fix multiple SIGHUP causes Telegraf to shutdown. +- Fix batch fails when single metric is unserializable. +- Log a warning on write if the metric buffer has overflowed. + +#### Plugins + +- AMQP (`amqp`) output + - Fix direct exchange routing key. +- Apex Neptune (`apex_neptune`) inpur + - Skip invalid power times. +- Docker (`docker`) input + - Fix docker input does not parse image name correctly. +- Fibaro (`fibaro`) input + - Set default timeout of `5s`. +- InfluxDB v1.x (`influxdb`) output + - Fix connection leak on reload. +- InfluxDB v2 output + - Fix connection leak on reload. +- Lustre 2 (`lustre2`) input + - Fix only one job per storage target reported. +- Microsoft Azure Monitor (`azure_monitor`) output + - Fix scale set resource id. +- Microsoft SQL Server (`sqlserver`) input + Fix connection closing on error. +- Minecraft (`minecraft`) input + - Support Minecraft server 1.13 and newer. +- NGINX Upstream Check (`nginx_upstream_check`) input + - Fix TOML option name. +- PgBounder (`pgbouncer`) input + - Fix unsupported pkt type error. +- Procstat (`procstat`) input + - Verify a process passed by `pid_file` exists. +- VMware vSphere (`vsphere`) input + - Fixed datastore name mapping. + +## v1.10.4 [2019-05-14] + +### Bug fixes + +#### Agent + +- Create telegraf user in pre-install RPM scriptlet. +- Fix parse of unix timestamp with more than ns precision. +- Fix race condition in the Wavefront parser. + +#### Plugins + +- HTTP output plugin (`http`) + - Fix http output cannot set Host header. +- IPMI Sensor input (`ipmi_sensor`) + - Add support for hex values. +- InfluxDB v2 output (`influxdb_v2`) + - Don't discard metrics on forbidden error. +- Interrupts input (`interrupts`) + - Restore field name case. +- NTPQ input (`ntpq`) + - Skip lines with missing `refid`. +- VMware vSphere input (`vsphere`) + - Fix interval estimation. + +## v1.10.3 [2019-04-16] + +### Bug fixes + +#### Agent + +- Set log directory attributes in RPM specification. + +#### Plugins + +- Prometheus Client (`prometheus_client`) output plugin. + - Allow colons in metric names. + +## v1.10.2 [2019-04-02] + +### Breaking changes + + Grok input data format (parser): string fields no longer have leading and trailing quotation marks removed. + If you are capturing quoted strings, the patterns might need to be updated. + +### Bug fixes + +#### Agent + +- Fix deadlock when Telegraf is aligning aggregators. +- Add owned directories to RPM package specification. +- Fix drop tracking of metrics removed with aggregator `drop_original`. +- Fix aggregator window alignment. +- Fix panic during shutdown of multiple aggregators. +- Fix tags applied to wrong metric on parse error. + +#### Plugins + +- Ceph (`ceph`) input + - Fix missing cluster stats. +- DiskIO (`diskio`) input + - Fix reading major and minor block devices identifiers. +- File (`file`) output + - Fix open file error handling. +- Filecount (`filecount`) input + - Fix basedir check and parent dir extraction. +- Grok (`grok`) parser + - Fix last character removed from string field. +- InfluxDB v2 (`influxdb_v2`) output + - Fix plugin name in output logging. +- Prometheus (`prometheus`) input + - Fix parsing of kube config `certificate-authority-data`. +- Prometheus (`prometheus`) output + - Remove tags that would create invalid label names. +- StatsD (`statsd`) input + - Listen before leaving start. + +## v1.10.1 [2019-03-19] + +#### Bug fixes + +- Show error when TLS configuration cannot be loaded. +- Add base64-encoding/decoding for Google Cloud PubSub (`pubsub`) plugins. +- Fix type compatibility in VMware vSphere (`vsphere`) input plugin with `use_int_samples` option. +- Fix VMware vSphere (`vsphere`) input plugin shows failed task in vCenter. +- Fix invalid measurement name and skip column in the CSV input data format parser. +- Fix System (`system`) input plugin causing high CPU usage on Raspbian. + +## v1.10 [2019-03-05] + +#### New input plugins + +- [Google Cloud PubSub (`cloud_pubsub`)](https://github.com/influxdata/telegraf/blob/release-1.10/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye +- [Kubernetes Inventory (`kube_inventory`)](https://github.com/influxdata/telegraf/blob/release-1.10/plugins/inputs/cloud_pubsub_push/README.md) - Contributed by @influxdata +- [Neptune Apex (`neptune_apex`)](https://github.com/influxdata/telegraf/blob/release-1.10/plugins/inputs/neptune_apex/README.md) - Contributed by @MaxRenaud +- [NGINX Upstream Check (`nginx_upstream_check`)](https://github.com/influxdata/telegraf/blob/release-1.10/plugins/inputs/nginx_upstream_check/README.md) - Contributed by @dmitryilyin +- [Multifile (`multifile`)](https://github.com/influxdata/telegraf/blob/release-1.10/plugins/inputs/multifile/README.md) - Contributed by @martin2250 + +#### New output plugins + +- [Google Cloud PubSub (`cloud_pubsub`)](https://github.com/influxdata/telegraf/blob/release-1.10/plugins/outputs/cloud_pubsub/README.md) - Contributed by @emilymye + +#### New output data formats (serializers) + +- [ServiceNow Metrics](/telegraf/v1.12/data_formats/output/nowmetric) - Contributed by @JefMuller +- [Carbon2](/telegraf/v1.12/data_formats/output/carbon2) - Contributed by @frankreno + +#### Features + +- **General** + - Allow for force gathering ES cluster stats. + - Add Linux `mipsle` packages. +- **Input plugins** + - Ceph (`ceph`) + - Add read and write op per second fields. + - CouchDB (`couchdb`) + - Add support for basic auth. + - DNS Query (`dns_query`) + - Add `rcode` tag and field. + - DiskIO (`diskio`) + - Include `DEVLINKS` in available `udev` properties. + - HTTP (`http`) + - Add support for sending a request body to `http` input. + - InfluxDB Listener (`influxdb_listener`) + - Add internal metric for line too long. + - Interrupts (`interrupts`) + - Add option to store `cpu` as a tag. + - Kafka Consumer (`kafka_consumer`) + - Add ability to tag metrics with topic. + - Kubernetes (`k8s`) + - Support passing bearer token directly. + - Microsoft SQL Server (`sqlserver`) + - Add log send and redo queue fields. + - MongoDB (`mongodb`) + - Add `flush_total_time_ns` and additional wired tiger fields. + - Procstat (`procstat_lookup`) + - Add running field. + - Prometheus (`prometheus`) + - Support passing bearer token directly. + - Add option to report input timestamp. + - VMware vSphere (`vsphere`) + - Improve scalability. + - Add resource path-based filtering. + - Varnish (`varnish`) + - Add configurable timeout. +- **Output plugins** + - MQTT (`mqtt`) + - Add option to set retain flag on messages. + - Stackdriver (`stackdriver`) + - Add resource type and resource label support + - VMware Wavefront (`wavefront`) + - Add support for the Wavefront Direct Ingestion API. +- **Aggregator plugins** + - Value Counter (`valuecounter`) + - Allow counting float values. +- **Data formats** + - **Input data formats** + - CSV + - Support `unix_us` and `unix_ns` timestamp format. + - Add support for `unix` and `unix_ms` timestamps. + - Grok (`grok`) + - Allow parser to produce metrics with no fields. + - JSON + - Add micro and nanosecond unix timestamp support. + - **Output data formats** + - ServiceNow Metrics + +#### Bug fixes + +- **General** + - Use `systemd` in Amazon Linux 2 rpm. + - Fix `initscript` removes `pidfile` of restarted Telegraf process. +- **Input plugins** + - Consul (`consul`) + - Use datacenter option spelling. + - InfluxDB Listener (`influxdb_listener`) + - Remove auth from `/ping` route. + - Microsoft SQL Server (`sqlserver`) + - Set deadlock priority. + - Nstat (`nstat`) + - Remove error log when `snmp6` directory does not exist. + - Ping (`ping`) + - Host not added when using custom arguments. + - X.509 Certificate + - Fix input stops checking certificates after first error. +- **Output plugins** + - Prometheus (`prometheus`) + - Sort metrics by timestamp. + - Stackdriver (`stackdriver`) + - Skip string fields when writing. + - Send metrics in ascending time order. + +## v1.9.5 [2019-02-26] + +### Bug fixes + +* General + * Use `systemd` in Amazon Linux 2 rpm. +* Ceph Storage (`ceph`) input plugin + * Add backwards compatibility fields in usage and pool statistics. +* InfluxDB (`influxdb`) output plugin + * Fix UDP line splitting. +* Microsoft SQL Server (`sqlserver`) input plugin + * Set deadlock priority to low. + * Disable results by row in AzureDB query. +* Nstat (`nstat`) input plugin + * Remove error log when `snmp6` directory does not exist. +* Ping (`ping`) input plugin + * Host not added when using custom arguments. +* Stackdriver (`stackdriver`) output plugin + * Skip string fields when writing to stackdriver output. + * Send metrics in ascending time order. + +## v1.9.4 [2019-02-05] + +### Bug fixes + +* General + * Fix `skip_rows` and `skip_columns` options in csv parser. + * Build official packages with Go 1.11.5. +* Jenkins input plugin + * Always send basic auth in jenkins input. +* Syslog (`syslog`) input plugin + * Fix definition of multiple syslog plugins. + +## v1.9.3 [2019-01-22] + +#### Bug fixes + +* General + * Fix latest metrics not sent first when output fails. + * Fix `internal_write buffer_size` not reset on timed writes. +* AMQP Consumer (`amqp_consumer`) input plugin + - Fix `amqp_consumer` input stops consuming when it receives + unparsable messages. +* Couchbase (`couchbase`) input plugin + * Remove `userinfo` from cluster tag in `couchbase` input. +* Microsoft SQL Server (`sqlserver`) input plugin + * Fix arithmetic overflow in `sqlserver`) input. +* Prometheus (`prometheus`) input plugin + * Fix `prometheus` input not detecting added and removed pods. + +## v1.9.2 [2019-01-08] + +### Bug fixes + +- Increase `varnishstat` timeout. +- Remove storage calculation for non-Azure-managed instances and add server version. +- Fix error sending empty tag value in `azure_monitor` output. +- Fix panic with Prometheus input plugin on shutdown. +- Support non-transparent framing of syslog messages. +- Apply global- and plugin-level metric modifications before filtering. +- Fix `num_remapped_pgs` field in `ceph` plugin. +- Add `PDH_NO_DATA` to known counter error codes in `win_perf_counters`. +- Fix `amqp_consumer` stops consuming on empty message. +- Fix multiple replace tables not working in strings processor. +- Allow non-local UDP connections in `net_response`. +- Fix TOML option names in parser processor. +- Fix panic in Docker input with bad endpoint. +- Fix original metric modified by aggregator filters. + +## v1.9.1 [2018-12-11] + +### Bug fixes + +- Fix boolean handling in splunkmetric serializer. +- Set default config values in Jenkins input. +- Fix server connection and document stats in MongoDB input. +- Add X-Requested-By header to Graylog input. +- Fix metric memory not freed from the metric buffer on write. +- Add support for client TLS certificates in PostgreSQL inputs. +- Prevent panic when marking the offset in `kafka_consumer`. +- Add early metrics to aggregator and honor `drop_original` setting. +- Use `-W` flag on BSD variants in ping input. +- Allow delta metrics in Wavefront parser. + +## v1.9.0 [2018-11-20] + +#### Release Notes + +- The HTTP Listener (`http_listener`) input plugin has been renamed to + InfluxDB Listener (`influxdb_listener`) input plugin and + use of the original name is deprecated. The new name better describes the + intended use of the plugin as an InfluxDB relay. For general purpose + transfer of metrics in any format using HTTP, InfluxData recommends using + HTTP Listener v2 (`http_listener_v2`) input plugin. + +- Input plugins are no longer limited from adding metrics when the output is + writing and new metrics will move into the metric buffer as needed. This + will provide more robust degradation and recovery when writing to a slow + output at high throughput. + + To avoid overconsumption when reading from queue consumers, the following + input plugins use the new option `max_undelivered_messages` to limit the number + of outstanding unwritten metrics: + + * Apache Kafka Consumer (`kafka_consumer`) + * AMQP Consumer (`amqp_consumer`) + * MQTT Consumer (`mqtt_consumer`) + * NATS Consumer (`nats_consumer`) + * NSQ Consumer (`nsq_consumer`) + +#### New input plugins + +- [HTTP Listener v2 (`http_listener_v2`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 +- [IPVS (`ipvs`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/ipvs/README.md) - Contributed by @amoghe +- [Jenkins (`jenkins`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/jenkins/README.md) - Contributed by @influxdata & @lpic10 +- [NGINX Plus API (`nginx_plus_api`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nginx_plus_api/README.md) - Contributed by @Bugagazavr +- [NGINX VTS (`nginx_vts`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nginx_vts/README.md) - Contributed by @monder +- [Wireless (`wireless`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/wireless/README.md) - Contributed by @jamesmaidment + +#### New output plugins + +- [Stackdriver (stackdriver)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/stackdriver/README.md) - Contributed by @jamesmaidment + +#### Features + +- General + - Add ability to define a custom service name when installing as a Windows service. + - Add new configuration for CSV column explicit type conversion. + - Add Telegraf version to `User-Agent` header. + - Add ability to specify bytes options as strings with units. + - Add per output `flush_interval`, `metric_buffer_limit`, and `metric_batch_size`. +- Amazon Kinesis (`kinesis`) output plugin + - Use `DescribeStreamSummary` in place of `ListStreams`. +- DNS Query (`dns_query`) input plugin + - Query servers in parallel. +- Datadog (`datadog`) output plugin + - Add an option to specify a custom URL. + - Use non-allocating field and tag accessors. +- Filecount (`filecount`) input plugin + - Add per-directory file count. +- HTTP Output (`http output`) plugin + - Add entity-body compression. +- Memcached (`memcached`) input plugin + - Collect additional statistics. +- NSQ (`nsq`) input plugin + - Add TLS configuration support. +- Ping (`ping`) input plugin + - Add support for IPv6. +- Procstat (`procstat`) input plugin + - Add Windows service name lookup. +- Prometheus (`prometheus`) input plugin + - Add scraping for Prometheus annotation in Kubernetes. + - Allow connecting to Prometheus using UNIX socket. +- Strings (`strings`) processor plugin + - Add `replace` function. +- VMware vSphere (`vsphere`) input plugin + - Add LUN to data source translation. + +#### Bug fixes + +- Remove `time_key` from the field values in JSON parser. +- Fix input time rounding when using a custom interval. +- Fix potential deadlock or leaked resources on restart or reload. +- Fix outputs block inputs when batch size is reached. +- Fix potential missing datastore metrics in VMware vSphere (`vsphere`) input plugin. + +## v1.8.3 [2018-10-30] + +### Bug fixes + +- Add DN attributes as tags in X.509 Certificate (`x509_cert`) input plugin to avoid series overwrite. +- Prevent connection leak by closing unused connections in AMQP (`amqp`) output plugin. +- Use default partition key when tag does not exist in Amazon Kinesis (`kinesis`) output plugin. +- Log the correct error in JTI OpenConfig Telemetry (`jti_openconfig_telemetry`) input plugin. +- Handle panic when IMPI Sensor (`ipmi_sensor`) input plugin gets bad input. +- Don't add unserializable fields to Jolokia2 (`jolokia2`) input plugin. +- Fix version check in PostgreSQL Exstensible (`postgresql_extensible`) plugin. + +## v1.8.2 [2018-10-17] + +### Bug fixes + +* Aerospike (`aerospike`) input plugin + * Support uint fields. +* Docker (`docker`) input plugin + * Use container name from list if no name in container stats. +* Filecount (`filecount`) input plugin + * Prevent panic on error in file stat. +* InfluxDB v2 (`influxdb_v2`) input plugin + * Update write path to match updated v2 API. +* Logparser (`logparser`) input plugin + * Fix panic. +* MongoDB (`mongodb`) input plugin + * Lower authorization errors to debug level. +* MQTT Consumer (`mqtt_consumer`) input plugin + * Fix connect and reconnect. +* Ping (`ping`) input plugin + * Return correct response code. +* VMware vSphere (`vsphere`) input plugin + * Fix missing timeouts. +* X.509 Certificate (`x509_cert`) input plugin + * Fix segfault. + +## v1.8.1 [2018-10-03] + +### Bug fixes + +- Fix `hardware_type` may be truncated in Microsoft SQL Server (`sqlserver`) input plugin. +- Improve performance in Basicstats (`basicstats`) aggregator plugin. +- Add `hostname` to TLS config for SNI support in X.509 Certicate (`x509_cert`) input plugin. +- Don't add tags with empty values to OpenTSDB (`opentsdb`) output plugin. +- Fix panic during network error in VMware vSphere (`vsphere`) input plugin. +- Unify error response in HTTP Listener (`http_listener`) input plugin with InfluxDB (`influxdb`) output plugin. +- Add `UUID` to VMs in VMware vSphere (`vsphere`) input plugin. +- Skip tags with empty values in Amazon Cloudwatch (`cloudwatch`) output plugin. +- Fix missing non-realtime samples in VMware vSphere (`vsphere`) input plugin. +- Fix case of `timezone`/`grok_timezone` options in grok parser and logparser input plugin. + +## v1.8 [2018-09-21] + +### New input plugins + +- [ActiveMQ (`activemq`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/activemq/README.md) - Contributed by @mlabouardy +- [Beanstalkd (`beanstalkd`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/beanstalkd/README.md) - Contributed by @44px +- [File (`file`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/file/README.md) - Contributed by @maxunt +- [Filecount (`filecount`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/filecount/README.md) - Contributed by @sometimesfood +- [Icinga2 (`icinga2`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/icinga2/README.md) - Contributed by @mlabouardy +- [Kibana (`kibana`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/kibana/README.md) - Contributed by @lpic10 +- [PgBouncer (`pgbouncer`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/pgbouncer/README.md) - Contributed by @nerzhul +- [Temp (`temp`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/temp/README.md) - Contributed by @pytimer +- [Tengine (`tengine`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/tengine/README.md) - Contributed by @ertaoxu +- [VMware vSphere (`vsphere`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/vsphere/README.md) - Contributed by @prydin +- [X.509 Certificate (`x509_cert`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/x509_cert/README.md) - Contributed by @jtyr + +### New processor plugins + +- [Enum (`enum`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/processors/enum/README.md) - Contributed by @KarstenSchnitter +- [Parser (`parser`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/processors/parser/README.md) - Contributed by @Ayrdrie & @maxunt +- [Rename (`rename`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/processors/rename/README.md) - Contributed by @goldibex +- [Strings (`strings`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/processors/strings/README.md) - Contributed by @bsmaldon + +### New aggregator plugins + +- [ValueCounter (`valuecounter`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/aggregators/valuecounter/README.md) - Contributed by @piotr1212 + +### New output plugins + +- [Azure Monitor (`azure_monitor`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/outputs/azure_monitor/README.md) - Contributed by @influxdata +- [InfluxDB v2 (`influxdb_v2`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/outputs/influxdb_v2/README.md) - Contributed by @influxdata + +### New input data formats (parsers) + +- [csv](https://archive.docs.influxdata.com/telegraf/v1.8/data_formats/input/csv) - Contributed by @maxunt +- [grok](https://archive.docs.influxdata.com/telegraf/v1.8/data_formats/input/grok/) - Contributed by @maxunt +- [logfmt](https://archive.docs.influxdata.com/telegraf/v1.8/data_formats/input/logfmt/) - Contributed by @Ayrdrie & @maxunt +- [wavefront](https://archive.docs.influxdata.com/telegraf/v1.8/data_formats/input/wavefront/) - Contributed by @puckpuck + +### New output data formats (serializers) + +- [splunkmetric](https://archive.docs.influxdata.com/telegraf/v1.8/data_formats/output/splunkmetric/) - Contributed by @ronnocol + +### Features + +- Add SSL/TLS support to Redis (`redis`) input plugin. +- Add tengine input plugin. +- Add power draw field to the NVIDIA SMI (`nvidia_smi`) input plugin. +- Add support for Solr 7 to the Solr (`solr`) input plugin. +- Add owner tag on partitions in Burrow (`burrow`) input plugin. +- Add container status tag to Docker (`docker`) input plugin. +- Add ValueCounter (`valuecounter`) aggregator plugin. +- Add new measurement with results of `pgrep` lookup to Procstat (`procstat`) input plugin. +- Add support for comma in logparser timestamp format. +- Add path tag to Tail (`tail`) input plugin. +- Add log message when tail is added or removed from a file. +- Add option to use of counter time in win perf counters. +- Add energy and power field and device id tag to Fibaro (`fibaro`) input plugin. +- Add HTTP path configuration for OpenTSDB output. +- Gather IPMI metrics concurrently. +- Add mongo document and connection metrics. +- Add enum processor plugin. +- Add user tag to procstat input. +- Add support for multivalue metrics to collectd parser. +- Add support for setting kafka client id. +- Add file input plugin and grok parser. +- Improve cloudwatch output performance. +- Add x509_cert input plugin. +- Add IPSIpAddress syntax to ipaddr conversion in snmp plugin. +- Add Filecount filecount input plugin. +- Add support for configuring an AWS `endpoint_url`. +- Send all messages before waiting for results in Kafka output plugin. +- Add support for lz4 compression to Kafka output plugin. +- Split multiple sensor keys in ipmi input. +- Support StatisticValues in cloudwatch output plugin. +- Add ip restriction for the prometheus_client output. +- Add PgBouncer (`pgbouncer`) input plugin. +- Add ActiveMQ input plugin. +- Add wavefront parser plugin. +- Add rename processor plugin. +- Add message 'max_bytes' configuration to kafka input. +- Add gopsutil meminfo fields to Mem (`mem`) input plugin. +- Document how to parse Telegraf logs. +- Use dep v0.5.0. +- Add ability to set measurement from matched text in grok parser. +- Drop message batches in Kafka (`kafka`) output plugin if too large. +- Add support for static and random routing keys in Kafka (`kafka`) output plugin. +- Add logfmt parser plugin. +- Add parser processor plugin. +- Add Icinga2 input plugin. +- Add name, time, path and string field options to JSON parser. +- Add forwarded records to sqlserver input. +- Add Kibana input plugin. +- Add csv parser plugin. +- Add read_buffer_size option to statsd input. +- Add azure_monitor output plugin. +- Add queue_durability parameter to amqp_consumer input. +- Add strings processor. +- Add OAuth 2.0 support to HTTP output plugin. +- Add Unix epoch timestamp support for JSON parser. +- Add options for basic auth to haproxy input. +- Add temp input plugin. +- Add Beanstalkd input plugin. +- Add means to specify server password for redis input. +- Add Splunk Metrics serializer. +- Add input plugin for VMware vSphere. +- Align metrics window to interval in cloudwatch input. +- Improve Azure Managed Instance support + more in sqlserver input. +- Allow alternate binaries for iptables input plugin. +- Add influxdb_v2 output plugin. + +### Bug fixes + +- Fix divide by zero in logparser input. +- Fix instance and object name in performance counters with backslashes. +- Reset/flush saved contents from bad metric. +- Document all supported cli arguments. +- Log access denied opening a service at debug level in win_services. +- Add support for Kafka 2.0. +- Fix nagios parser does not support ranges in performance data. +- Fix nagios parser does not strip quotes from performance data. +- Fix null value crash in postgresql_extensible input. +- Remove the startup authentication check from the cloudwatch output. +- Support tailing files created after startup in tail input. +- Fix CSV format configuration loading. + + +## v1.7.4 [2018-08-29] + +### Bug fixes + +* Continue sending write batch in UDP if a metric is unserializable in InfluxDB (`influxdb`) output plugin. +* Fix PowerDNS (`powerdns`) input plugin tests. +* Fix `burrow_group` offset calculation for Burrow (`burrow`) input plugin. +* Add `result_code` value for errors running ping command. +* Remove timeout deadline for UDP in Syslog (`syslog`) input plugin. +* Ensure channel is closed if an error occurs in CGroup (`cgroup`) input plugin. +* Fix sending of basic authentication credentials in HTTP `(output)` output plugin. +* Use the correct `GOARM` value in the Linux armel package. + +## v1.7.3 [2018-08-07] + +### Bug fixes + +* Reduce required Docker API version. +* Keep leading whitespace for messages in syslog input. +* Skip bad entries on interrupt input. +* Preserve metric type when using filters in output plugins. +* Fix error message if URL is unparseable in InfluxDB output. +* Use explicit `zpool` properties to fix parse error on FreeBSD 11.2. +* Lock buffer when adding metrics. + +## v1.7.2 [2018-07-18] + +### Bug fixes + +* Use localhost as default server tag in Zookeeper (`zookeeper`) input plugin. +* Don't set values when pattern doesn't match in Regex (`regex`) processor plugin. +* Fix output format of Printer (`printer`) processor plugin. +* Fix metric can have duplicate field. +* Return error if NewRequest fails in HTTP (`http`) output plugin. +* Reset read deadline for Syslog (`syslog`) input plugin. +* Exclude cached memory on Docker (`docker`) input plugin. + +## v1.7.1 [2018-07-03] + +### Bug fixes + +* Treat `sigterm` as a clean shutdown signal. +* Fix selection of tags under nested objects in the JSON parser. +* Fix Postfix (`postfix`) input plugin handling of multilevel queues. +* Fix Syslog (`syslog` input plugin timestamp parsing with single digit day of month. +* Handle MySQL (`mysql`) input plugin variations in the `user_statistics` collecting. +* Fix Minmax (`minmax`) and Basicstats (`basicstats`) aggregator plugins to use `uint64`. +* Document Swap (`swap`) input plugin. +* Fix incorrect precision being applied to metric in HTTP Listener (`http_listener`) input plugin. + +## v1.7 [2018-06-12] + +### Release notes + +- The Cassandra (`cassandra`) input plugin has been deprecated in favor of the Jolokia2 (`jolokia2`) + input plugin which is much more configurable and more performant. There is + an [example configuration](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/jolokia2/examples) to help you + get started. + +- For plugins supporting TLS, you can now specify the certificate and keys + using `tls_ca`, `tls_cert`, `tls_key`. These options behave the same as + the, now deprecated, `ssl` forms. + +### New input plugins + +- [Aurora (`aurora`)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/aurora/README.md) - Contributed by @influxdata +- [Burrow (`burrow`) input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/burrow/README.md) - Contributed by @arkady-emelyanov +- [`fibaro`](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/fibaro/README.md) - Contributed by @dynek +- [`jti_openconfig_telemetry`](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/jti_openconfig_telemetry/README.md) - Contributed by @ajhai +- [`mcrouter`](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mcrouter/README.md) - Contributed by @cthayer +- [NVIDIA SMI (`nvidia_smi`)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/nvidia_smi/README.md) - Contributed by @jackzampolin +- [Syslog (`syslog`)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/syslog/README.md) - Contributed by @influxdata + +### New processor plugins + +- [converter](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/processors/converter/README.md) - Contributed by @influxdata +- [regex](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/processors/regex/README.md) - Contributed by @44px +- [topk](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/processors/topk/README.md) - Contributed by @mirath + +### New output plugins + +- [HTTP (`http`)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/http/README.md) - Contributed by @Dark0096 +- [Application Insights (`application_insights`) output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/application_insights/README.md): Contribute by @karolz-ms + +### Features + +- Add `repl_oplog_window_sec` metric to MongoDB (`mongodb`) input plugin. +- Add per-host shard metrics in MongoDB (`mongodb`) input plugin. +- Skip files with leading `..` in config directory. +- Add TLS support to `socket_writer` and `socket_listener` plugins. +- Add `snmp` input option to strip non-fixed length index suffixes. +- Add server version tag to the Docker (`docker`) input plugin. +- Add support for LeoFS 1.4 to `leofs` input. +- Add parameter to force the interval of gather for Sysstat (`sysstat`). +- Support BusyBox ping in the Ping (`ping`) input plugin. +- Add Mcrouter (`mcrouter`) input plugin. +- Add TopK (`topk`) processor plugin. +- Add cursor metrics to MongoDB (`mongodb`) input plugin. +- Add tag/integer pair for result to Network Response (`net_response`) input plugin. +- Add Application Insights (`application_insights`) output plugin. +- Added several important Elasticsearch cluster health metrics. +- Add batch mode to `mqtt` output. +- Add Aurora (`aurora`) input plugin. +- Add Regex (`regex`) processor plugin. +- Add support for Graphite 1.1 tags. +- Add timeout option to Sensors (`sensors)` input plugin. +- Add Burrow (`burrow`) input plugin. +- Add option to Unbound (`unbound`) input plugin to use threads as tags. +- Add support for TLS and username/password auth to Aerospike (`aerospike`) input plugin. +- Add special syslog timestamp parser to grok parser that uses current year. +- Add Syslog (`syslog`) input plugin. +- Print the enabled aggregator and processor plugins on startup. +- Add static `routing_key` option to `amqp` output. +- Add passive mode exchange declaration option to AMQP Consumer (`amqp_consumer`) input plugin. +- Add counter fields to PF (`pf`) input plugin. + +### Bug fixes + +- Write to working file outputs if any files are not writeable. +- Add all win_perf_counters fields for a series in a single metric. +- Report results of `dns_query` instead of `0ms` on timeout. +- Add consul service tags to metric. +- Fix wildcards and multi instance processes in win_perf_counters. +- Fix crash on 32-bit Windows in `win_perf_counters`. +- Fix `win_perf_counters` not collecting at every interval. +- Use same flags for all BSD family ping variants. + + +## v1.6.4 [2018-06-05] + +### Bug fixes + +* Fix SNMP overriding of auto-configured table fields. +* Fix uint support in CloudWatch output. +* Fix documentation of `instance_name` option in Varnish input. +* Revert to previous Aerospike library version due to memory leak. + +## v1.6.3 [2018-05-21] + +### Bug fixes + +* Fix intermittent panic in Aerospike input plugin. +* Fix connection leak in the Jolokia agent (`Jolokia2_agent`) input plugin. +* Fix Jolokia agent (`Jolokia2_agent`) input plugin timeout parsing. +* Fix error parsing Dropwizard metrics. +* Fix Librato (`librato`) output plugin support for unsigned integer (`uint`) and Boolean (`bool`). +* Fix WaitGroup deadlock, if URL is incorrect, in Apache input plugin. + +## v1.6.2 [2018-05-08] + +### Bug fixes + +* Use same timestamp for fields in system input. +* Fix handling of uint64 in Datadog (`datadog`) output. +* Ignore UTF8 BOM in JSON parser. +* Fix case for slave metrics in MySQL (`mysql`) input. +* Fix uint support in CrateDB (`cratedb`) output. + + +## v1.6.1 [2018-04-23] + +### Bug fixes + +* Report mem input fields as gauges instead of counters. +* Fix Graphite outputs unsigned integers in wrong format. +* Report available fields if `utmp` is unreadable. +* Fix potential `no fields` error writing to outputs. +* Fix uptime reporting in system input when ran inside docker. +* Fix mem input `cannot allocate memory` error on FreeBSD-based systems. +* Fix duplicate tags when overriding an existing tag. +* Add server argument as first argument in the Unbound (`unbound`) input plugin. +* Fix handling of floats with multiple leading zeroes. +* Return errors in SSL/TLS configuration of MongoDB (`mongodb`) input plugin. + + +## v1.6 [2018-04-16] + +### Release notes + +- The MySQL (`mysql`) input plugin has been updated fix a number of type conversion + issues. This may cause a `field type error` when inserting into InfluxDB due + the change of types. + + To address this, we have introduced a new `metric_version` option to control + enabling the new format. + For in depth recommendations on upgrading, see [Metric version](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mysql#metric-version) in the MySQL input plugin documentation. + + You are encouraged to migrate to the new model when possible as the old version + is deprecated and will be removed in a future version. + +- The PostgreSQL (`postgresql`) input plugin now defaults to using a persistent connection to the database. + In environments where TCP connections are terminated, the `max_lifetime` + setting should be set less than the collection `interval` to prevent errors. + +- The SQL Server (`sqlserver`) input plugin has a new query and data model that can be enabled + by setting `query_version = 2`. + Migrate to the new model, if possible, since the old version is deprecated and will be removed in a future version. + +- The OpenLDAP (`openldap`) input plugin has a new option, `reverse_metric_names = true`, that reverses metric + names to improve grouping. + Enable this option, when possible, as the old ordering is deprecated. + +- The new HTTP (`http`) input plugin, when configured with `data_format = "json"`, can perform the + same task as the, now deprecated, HTTP JSON (`httpjson`) input plugin. + + +### New input plugins + +- [HTTP (`http`) input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/http/README.md) - Thanks to @grange74 +- [Ipset (`ipset`) input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ipset/README.md) - Thanks to @sajoupa +- [NATS Server Monitoring (`nats`) input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/nats/README.md) - Thanks to @mjs and @levex + +### New processor plugins + +- [Override (`override`) processor plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/processors/override/README.md) - Thanks to @KarstenSchnitter + +### New parsers + +- [Dropwizard input data format](https://github.com/influxdata/telegraf/blob/release-1.8/docs/DATA_FORMATS_INPUT.md#dropwizard) - Thanks to @atzoum + +### Features + +* Add health status mapping from `string` to `int` in Elasticsearch (`elasticsearch`) input plugin. +* Add control over which stats to gather in BasicStats (`basicstats`) aggregator plugin. +* Add `messages_delivered_get` to RabbitMQ (`rabbitmq`) input plugin. +* Add `wired` field to mem input plugin. +* Add support for gathering exchange metrics to the RabbitMQ (`rabbitmq`) input plugin. +* Add support for additional metrics on Linux in Zfs (`zfs`) input plugin. +* Add `available_entropy` field to Kernel (`kernel`) input plugin. +* Add user privilege level setting to IPMI sensors. +* Use persistent connection to PostgreSQL database. +* Add support for dropwizard input data format. +* Add container health metrics to Docker (`docker`) input plugin. +* Add support for using globs in devices list of DiskIO (`diskio`) input plugin. +* Allow running as console application on Windows. +* Add listener counts and node running status to RabbitMQ (`rabbitmq`) input plugin. +* Add NATS Server Monitoring (`nats`) input plugin. +* Add ability to select which queues will be gathered in RabbitMQ (`rabbitmq`) input plugin. +* Add support for setting BSD source address to the ping (`ping`) input plugin. +* Add Ipset (`ipset`) input plugin. +* Add TLS and HTTP basic auth to Prometheus Client (`prometheus_client`) output plugin. +* Add new sqlserver output data model. +* Add native Go method for finding `pid` to the Procstat (`procstat`) input plugin. +* Add additional metrics and reverse metric names option to OpenLDAP (`openldap`) input plugin. +* Add TLS support to the Mesos (`mesos`) input plugin. +* Add HTTP (`http`) input plugin. +* Add keep alive support to the TCP mode of StatsD (`statsd`) input plugin . +* Support deadline in Ping (`ping`) input plugin. +* Add option to disable labels in the Prometheus Client (`prometheus`) output plugin for string fields. +* Add shard server stats to the MongoDB (`mongodb`) input plugin. +* Add server option to Unbound (`unbound`) input plugin. +* Convert boolean metric values to float in Datadog (`datadog`) output plugin. +* Add Solr 3 compatibility. +* Add sum stat to BasicStats (`basicstats`) aggregator plugin. +* Add ability to override proxy from environment in HTTP Response (`http_response`) input plugin. +* Add host to ping timeout log message. +* Add override processor plugin. +* Add `status_code` and result tags and `result_type` field to HTTP Response (`http_response`) input plugin. +* Added config flag to skip collection of network protocol metrics. +* Add TLS support to Kapacitor (`kapacitor`) input plugin. +* Add HTTP basic auth support to the HTTP Listener (`http_listener`) input plugin. +* Tags in output InfluxDB Line Protocol are now sorted. +* InfluxDB Line Protocol parser now accepts DOS line endings. +* An option has been added to skip database creation in the InfluxDB (`influxdb`) output plugin. +* Add support for connecting to InfluxDB over a UNIX domain socket. +* Add optional unsigned integer support to the influx data format. +* Add TLS support to Zookeeper (`zookeeper`) input plugin. +* Add filters for container state to Docker (`docker`) input plugin. + +### Bug fixes + +* Fix various MySQL data type conversions. +* Fix metric buffer limit in internal plugin after reload. +* Fix panic in HTTP Response (`http_response`) input plugin on invalid regex. +* Fix socket_listener setting ReadBufferSize on TCP sockets. +* Add tag for target URL to `phpfpm` input plugin. +* Fix cannot unmarshal object error in Mesosphere DC/OS (`dcos`) input plugin. +* Fix InfluxDB output not able to reconnect when server address changes. +* Fix parsing of DOS line endings in the SMART (`smart`) input plugin. +* Fix precision truncation when no timestamp included. +* Fix SNMPv3 connection with Cisco ASA 5515 in SNMP (`snmp`) input plugin. + + +## v1.5.3 [2018-03-14] + +### Bug fixes + +* Set path to `/` if `HOST_MOUNT_PREFIX` matches full path. +* Remove `userinfo` from `url` tag in Prometheus input plugin. +* Fix Ping input plugin not reporting zero durations. +* Disable `keepalive` in MQTT output plugin to prevent deadlock. +* Fix collation difference in SQL Server (`sqlserver`) input plugin. +* Fix uptime metric in Passenger (`passenger`) input plugin. +* Add output of stderr in case of error to exec log message. + +## v1.5.2 [2018-01-30] + +### Bug fixes + +- Ignore empty lines in Graphite plaintext. +- Fix `index out of bounds` error in Solr input plugin. +- Reconnect before sending Graphite metrics if disconnected. +- Align aggregator period with internal ticker to avoid skipping metrics. +- Fix a potential deadlock when using aggregators. +- Limit wait time for writes in MQTT (`mqtt`) output plugin. +- Revert change in Graphite (`graphite`) output plugin where dot(`.`) in field key was replaced by underscore (`_`). +- Add `timeout` to Wavefront output write. +- Exclude `master_replid` fields from Redis input. + +## v1.5.1 [2017-01-10] + +### Bug fixes + +- Fix name error in jolokia2_agent sample config. +- Fix DC/OS input - login expiration time. +- Set Content-Type charset parameter in InfluxDB (`influxdb`) output plugin and allow it to be overridden. +- Document permissions setup for Postfix (`postfix`) input plugin. +- Fix `deliver_get` field in RabbitMQ (`rabbitmq`) input plugin. +- Escape environment variables during config TOML parsing. + +## v1.5 [2017-12-14] + +### New plugins + +#### Input plugins +- [Bond (bond)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/bond/README.md) - Thanks to @ildarsv +- [DC/OS (dcos)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/dcos/README.md) - Thanks to @influxdata +- [Jolokia2 (jolokia2)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/jolokia2/README.md) - Thanks to @dylanmei +- [NGINX Plus (nginx_plus)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah +- [OpenSMTPD (opensmtpd)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/opensmtpd/README.md) - Thanks to @aromeyer +- [Particle.io Webhooks (particle)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/webhooks/particle/README.md) - Thanks to @davidgs +- [PF (pf)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/pf/README.md) - Thanks to @nferch +- [Postfix (postfix)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/postfix/README.md) - Thanks to @phemmer +- [SMART (smart)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen +- [Solr (solr)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/solr/README.md) - Thanks to @ljagiello +- [Teamspeak (teamspeak)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/teamspeak/README.md) - Thanks to @p4ddy1 +- [Unbound (unbound)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/unbound/README.md) - Thanks to @aromeyer + +#### Aggregator plugins +- [BasicStats (basicstats)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno + +#### Output plugins +- [CrateDB (cratedb)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/cratedb) - Thanks to @felixge +- [Wavefront (wavefront)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/outputs/wavefront/README.md) - Thanks to @puckpuck + + +### Release notes + +- In the Kinesis (`kinesis`) output plugin, use of the `partition_key` and + `use_random_partitionkey` options has been deprecated in favor of the + `partition` subtable. This allows for more flexible methods to set the + partition key such as by metric name or by tag. + +- With the release of the new improved Jolokia2 (`jolokia2`) input plugin, the legacy `jolokia` + plugin is deprecated and will be removed in a future release. Users of this + plugin are encouraged to update to the new `jolokia2` plugin. + +### Features + +- Add support for sharding based on metric name. +- Add Kafka output plugin `topic_suffix` option. +- Include mount mode option in disk metrics. +- TLS and MTLS enhancements to HTTP Listener input plugin. +- Add polling method to logparser and tail inputs. +- Add timeout option for Kubernetes (`kubernetes`) input plugin. +- Add support for timing sums in statsd input plugin. +- Add resource limit monitoring to Procstat (`procstat`) input plugin. +- Add support for k8s service DNS discovery to Prometheus Client (`prometheus`) input plugin. +- Add configurable metrics endpoint to (`prometheus`) output plugin. +- Add support for NSQLookupd to `nsq_consumer`. +- Add configurable separator for metrics and fields in OpenTSDB (`opentsdb`) output plugin. +- Add support for the rollbar occurrence webhook event. +- Add extra wired tiger cache metrics to `mongodb` input. +- Collect Docker Swarm service metrics in Docker (`docker`) input plugin. +- Add cluster health level configuration to Elasticsearch (`elasticsearch`) input plugin. +- Add ability to limit node stats in Elasticsearch (`elasticsearch`) input plugin. +- Add UDP IPv6 support to StatsD (`statsd`) input plugin. +- Use labels in Prometheus Client (`prometheus`) output plugin for string fields. +- Add support for decimal timestamps to ts-epoch modifier. +- Add histogram and summary types and use in Prometheus (`prometheus`) plugins. +- Gather concurrently from snmp agents. +- Perform DNS lookup before ping and report result. +- Add instance name option to Varnish (`varnish`) plugin. +- Add support for SSL settings to ElasticSearch (`elasticsearch`) output plugin. +- Add modification_time field to Filestat (`filestat`) input plugin. +- Add systemd unit pid and cgroup matching to Procstat (`procstat`) . +- Use MAX() instead of SUM() for latency measurements in SQL Server (`sqlserver`) input plugin. +- Add index by week number to Elasticsearch (`elasticsearch`) output plugin. +- Add support for tags in the index name in Elasticsearch (`elasticsearch`) output plugin. +- Add slab to mem plugin. +- Add support for glob patterns in net input plugin. +- Add option to AMQP (`amqp`) output plugin to publish persistent messages. +- Support I (idle) process state on procfs+Linux. + +### Bug fixes + +- Fix webhooks input address in use during reload. +- Unlock Statsd when stopping to prevent deadlock. +- Fix cloudwatch output requires unneeded permissions. +- Fix prometheus passthrough for existing value types. +- Always ignore autofs filesystems in disk input. +- Fail metrics parsing on unescaped quotes. +- Whitelist allowed char classes for graphite output. +- Use hexadecimal ids and lowercase names in zipkin input. +- Fix snmp-tools output parsing with Windows EOLs. +- Add shadow-utils dependency to rpm package. +- Use deb-systemd-invoke to restart service. +- Fix kafka_consumer outside range of offsets error. +- Fix separation of multiple prometheus_client outputs. +- Don't add system input uptime_format as a counter. + +## v1.4.5 [2017-12-01] + +### Bug fixes + +- Fix global variable collection when using interval_slow option in MySQL input. +- Fix error getting net connections info in netstat input. +- Fix HOST_MOUNT_PREFIX in Docker with disk input. + +## v1.4.4 [2017-11-08] + +### Bug fixes +- Use schema specified in mqtt_consumer input. +- Redact Datadog API key in log output. +- Fix error getting PIDs in netstat input. +- Support HOST_VAR envvar to locate /var in system input. +- Use current time if Docker container read time is zero value. + +## v1.4.3 [2017-10-25] + +### Bug fixes + +- Fix container name filters in Docker input. +- Fix snmpwalk address format in leofs input. +- Fix case sensitivity issue in SQL Server query. +- Fix CPU input plugin stuck after suspend on Linux. +- Fix MongoDB input panic when restarting MongoDB. +- Preserve URL path prefix in InfluxDB output. +- Fix TELEGRAF_OPTS expansion in systemd service unit. +- Remove warning when JSON contains null value. +- Fix ACL token usage in consul input plugin. +- Fix unquoting error with Tomcat 6. +- Fix syscall panic in diskio on some Linux systems. + +## v1.4.2 [2017-10-10] + +### Bug fixes + +- Fix error if int larger than 32-bit in `/proc/vmstat`. +- Fix parsing of JSON with a UTF8 BOM in `httpjson`. +- Allow JSON data format to contain zero metrics. +- Fix format of connection_timeout in `mqtt_consumer`. +- Fix case sensitivity error in SQL Server input. +- Add support for proxy environment variables to `http_response`. +- Add support for standard proxy env vars in outputs. +- Fix panic in CPU input if number of CPUs changes. +- Use chunked transfer encoding in InfluxDB output. + +## v1.4.1 [2017-09-26] + +### Bug fixes + +- Fix MQTT input exits if Broker is not available on startup. +- Fix optional field value conversions in fluentd input. +- Whitelist allowed char classes for opentsdb output. +- Fix counter and gauge metric types. +- Fix skipped line with empty target in iptables. +- Fix duplicate keys in perf counters sqlserver query. +- Fix panic in statsd p100 calculation. +- Fix arm64 packages contain 32-bit executable. + +## v1.4.0 [2017-09-05] + +### Release Notes + +- The `kafka_consumer` input has been updated to support Kafka 0.9 and + above style consumer offset handling. The previous version of this plugin + supporting Kafka 0.8 and below is available as the `kafka_consumer_legacy` + plugin. +- In the `aerospike` input the `node_name` field has been changed to be a tag + for both the `aerospike_node` and `aerospike_namespace` measurements. +- The default prometheus_client port has been changed to 9273. + +### New plugins + +- fail2ban +- fluentd +- histogram +- minecraft +- openldap +- salesforce +- tomcat +- win_services +- zipkin + +### Features + +- Add Kafka 0.9+ consumer support. +- Add support for self-signed certs to InfluxDB input plugin. +- Add TCP listener for statsd input. +- Add Docker container environment variables as tags. Only whitelisted. +- Add timeout option to IPMI sensor plugin. +- Add support for an optional SSL/TLS configuration to Nginx input plugin. +- Add timezone support for logparser timestamps. +- Add result_type field for http_response input. +- Add include/exclude filters for docker containers. +- Add secure connection support to graphite output. +- Add min/max response time on linux/darwin to ping. +- Add HTTP Proxy support to influxdb output. +- Add standard SSL options to mysql input. +- Add input plugin for fail2ban. +- Support HOST_PROC in processes and linux_sysctl_fs inputs. +- Add Minecraft input plugin. +- Add support for RethinkDB 1.0 handshake protocol. +- Add optional usage_active and time_active CPU metrics. +- Change default prometheus_client port. +- Add fluentd input plugin. +- Add result_type field to net_response input plugin. +- Add read timeout to socket_listener. +- Add input plugin for OpenLDAP. +- Add network option to dns_query. +- Add redis_version field to redis input. +- Add tls options to docker input. +- Add histogram aggregator plugin. +- Add Zipkin input plugin. +- Add Windows Services input plugin. +- Add path tag to logparser containing path of logfile. +- Add Salesforce input plugin. +- Add option to run varnish under sudo. +- Add weighted_io_time to diskio input. +- Add gzip content-encoding support to influxdb output. +- Allow using system plugin in Windows. +- Add Tomcat input plugin. +- HTTP headers can be added to InfluxDB output. + +### Bug fixes + +- Improve logging of errors in Cassandra input. +- [enh] set db_version at 0 if query version fails. +- Fixed SQL Server input to work with case sensitive server collation. +- Systemd does not see all shutdowns as failures. +- Reuse transports in input plugins. +- Inputs processes fails with "no such process". +- Fix multiple plugin loading in win_perf_counters. +- MySQL input: log and continue on field parse error. +- Fix timeout option in Windows ping input sample configuration. +- Fix Kinesis output plugin in govcloud. +- Fix Aerospike input adds all nodes to a single series. +- Improve Prometheus Client output documentation. +- Display error message if prometheus output fails to listen. +- Fix elasticsearch output content type detection warning. +- Prevent possible deadlock when using aggregators. +- Fix combined tagdrop/tagpass filtering. +- Fix filtering when both pass and drop match an item. +- Only report cpu usage for online cpus in docker input. +- Start first aggregator period at startup time. +- Fix panic in logparser if file cannot be opened. +- Default to localhost if zookeeper has no servers set. +- Fix docker memory and cpu reporting in Windows. +- Allow iptable entries with trailing text. +- Sanitize password from couchbase metric. +- Converge to typed value in prometheus output. +- Skip compilcation of logparser and tail on solaris. +- Discard logging from tail library. +- Remove log message on ping timeout. +- Don't retry points beyond retention policy. +- Don't start Telegraf on install in Amazon Linux. +- Enable hddtemp input on all platforms. +- Escape backslash within string fields. +- Fix parsing of SHM remotes in ntpq input +- Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD. +- Fix NSQ input plugin when used with version 1.0.0-compat. +- Added CloudWatch metric constraint validation. +- Skip non-numerical values in graphite format. +- Fix panic when handling string fields with escapes. + +## v1.3.5 [2017-07-26] + +### Bug fixes + +- Fix prometheus output cannot be reloaded. +- Fix filestat reporting exists when cannot list directory. +- Fix ntpq parse issue when using dns_lookup. +- Fix panic when agent.interval = "0s". + +## v1.3.4 [2017-07-12] + +### Bug fixes + +- Fix handling of escape characters within fields. +- Fix chrony plugin does not track system time offset. +- Do not allow metrics with trailing slashes. +- Prevent Write from being called concurrently. + +## v1.3.3 [2017-06-28] + +### Bug fixes + +- Allow dos line endings in tail and logparser. +- Remove label value sanitization in prometheus output. +- Fix bug parsing default timestamps with modified precision. +- Fix panic in elasticsearch input if cannot determine master. + +## v1.3.2 [2017-06-14] + +### Bug fixes + +- Fix InfluxDB UDP metric splitting. +- Fix mongodb/leofs urls without scheme. +- Fix inconsistent label dimensions in prometheus output. + +## v1.3.1 [2017-05-31] + +### Bug fixes + +- Fixed sqlserver input to work with case-sensitive server collation. +- Reuse transports in input plugins. +- Process input fails with `no such process`. +- Fix InfluxDB output database quoting. +- Fix net input on older Linux kernels. +- Fix panic in mongo input. +- Fix length calculation of split metric buffer. + +## v1.3.0 [2017-05-09] + +#### Changes to the Windows ping plugin + +Users of the windows [ping plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ping) will need to drop or migrate their measurements to continue using the plugin. +The reason for this is that the windows plugin was outputting a different type than the linux plugin. +This made it impossible to use the `ping` plugin for both windows and linux machines. + +#### Changes to the Ceph plugin + +For the [Ceph plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ceph), the `ceph_pgmap_state` metric content has been modified to use a unique field `count`, with each state expressed as a `state` tag. + +Telegraf < 1.3: + +``` +# field_name value +active+clean 123 +active+clean+scrubbing 3 +``` + +Telegraf >= 1.3: + +``` +# field_name value tag +count 123 state=active+clean +count 3 state=active+clean+scrubbing +``` + +#### Rewritten Riemann plugin + +The [Riemann output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/riemann) has been rewritten +and the [previous riemann plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/riemann_legacy) is _incompatible_ with the new one. +The reasons for this are outlined in issue [#1878](https://github.com/influxdata/telegraf/issues/1878). +The previous Riemann output will still be available using `outputs.riemann_legacy` if needed, but that will eventually be deprecated. +It is highly recommended that all users migrate to the new Riemann output plugin. + +#### New Socket Listener and Socket Writer plugins + +Generic [Socket Listener](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/socket_listener) and [Socket Writer](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/socket_writer) plugins have been implemented for receiving and sending UDP, TCP, unix, & unix-datagram data. +These plugins will replace [udp_listener](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/udp_listener) and [tcp_listener](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/tcp_listener), which are still available but will be deprecated eventually. + +### Features + +- Add SASL options for the [Kafka output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/kafka). +- Add SSL configuration for [HAproxy input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/haproxy). +- Add the [Interrupts input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/interrupts). +- Add generic [Socket Listener input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/socket_listener) and [socket writer output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/socket_writer). +- Extend the [HTTP Response input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/http_response) to support searching for a substring in response. Return 1 if found, else 0. +- Add userstats to the [MySQL input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mysql). +- Add more InnoDB metric to the [MySQL input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mysql). +- For the [Ceph input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ceph), `ceph_pgmap_state` metric now uses a single field `count`, with PG state published as `state` tag. +- Use own client for improved through-put and less allocations in the [InfluxDB output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/influxdb). +- Keep -config-directory when running as Windows service. +- Rewrite the [Riemann output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/riemann). +- Add support for name templates and udev tags to the [DiskIO input plugin](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/diskio/README.md). +- Add integer metrics for [Consul](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/consul) check health state. +- Add lock option to the [IPtables input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/iptables). +- Support [ipmi_sensor input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ipmi_sensor) querying local ipmi sensors. +- Increment gather_errors for all errors emitted by inputs. +- Use the official docker SDK. +- Add [AMQP consumer input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/amqp_consumer). +- Add pprof tool. +- Support DEAD(X) state in the [system input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/system). +- Add support for [MongoDB](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mongodb) client certificates. +- Support adding [SNMP](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/snmp) table indexes as tags. +- Add [Elasticsearch 5.x output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/elasticsearch). +- Add json timestamp units configurability. +- Add support for Linux sysctl-fs metrics. +- Support to include/exclude docker container labels as tags. +- Add [DMCache input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/dmcache). +- Add support for precision in [HTTP Listener input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/http_listener). +- Add `message_len_max` option to the [Kafka consumer input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/kafka_consumer). +- Add [collectd parser](https://archive.docs.influxdata.com/telegraf/v1.3/concepts/data_formats_input/#collectd). +- Simplify plugin testing without outputs. +- Check signature in the [GitHub webhook input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/webhooks/github). +- Add [papertrail](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/webhooks/papertrail) support to webhooks. +- Change [jolokia input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/jolokia) to use bulk requests. +- Add [DiskIO input plugin](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/diskio/README.md) for Darwin. +- Add use_random_partitionkey option to the [Kinesis output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/kinesis). +- Add tcp keep-alive to [Socket Listener input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/socket_listener) and [Socket Writer output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/socket_writer). +- Add [Kapacitor input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/kapacitor). +- Use Go (golang) 1.8.1. +- Add documentation for the [RabbitMQ input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/rabbitmq). +- Make the [Logparser input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/logparser) check for newly-created files. + +### Bug fixes + +- Allow `@` symbol in password for the ipmi_sensor plugin. +- Fix arithmetic overflow error converting numeric to data type int in SQL Server input. +- Flush jitter can inhibit metric collection. +- Add missing fields for HAproxy input. +- Handle null startTime for stopped pods for the Kubernetes input. +- Fix cpu input panic when /proc/stat is empty. +- Fix telegraf swallowing panics in --test mode. +- Create pidfile with 644 permissions & defer file deletion. +- Fix install/remove of telegraf on non-systemd Debian/Ubuntu systems. +- Fix for reloading telegraf freezes prometheus output. +- Fix when empty tag value causes error on InfluxDB output. +- buffer_size field value is negative number from "internal" plugin. +- Missing error handling in the MySQL plugin leads to segmentation violation. +- Fix type conflict in windows ping plugin. +- logparser: regexp with lookahead. +- Telegraf can crash in LoadDirectory on 0600 files. +- Iptables input: document better that rules without a comment are ignored. +- Fix win_perf_counters capping values at 100. +- Exporting Ipmi.Path to be set by config. +- Remove warning if parse empty content. +- Update default value for Cloudwatch rate limit. +- create /etc/telegraf/telegraf.d directory in tarball. +- Return error on unsupported serializer data format. +- Fix Windows Performance Counters multi instance identifier. +- Add write timeout to Riemann output. +- fix timestamp parsing on prometheus plugin. +- Fix deadlock when output cannot write. +- Fix connection leak in postgresql. +- Set default measurement name for snmp input. +- Improve performance of diskio with many disks. +- The internal input plugin uses the wrong units for `heap_objects`. +- Fix ipmi_sensor config is shared between all plugin instances. +- Network statistics not collected when system has alias interfaces. +- Sysstat plugin needs LANG=C or similar locale. +- File output closes standard streams on reload. +- AMQP output disconnect blocks all outputs. +- Improve documentation for redis input plugin. + +## v1.2.1 [2017-02-01] + +### Bug fixes + +- Fix segfault on nil metrics with InfluxDB output. +- Fix negative number handling. + +### Features + +- Go (golang) version update 1.7.4 -> 1.7.5 + +## v1.2 [2017-01-24] + +### Release Notes + +- The StatsD plugin will now default all "delete_" config options to "true". This +will change te default behavior for users who were not specifying these parameters +in their config file. + +- The StatsD plugin will also no longer save it's state on a service reload. +Essentially we have reverted PR [#887](https://github.com/influxdata/telegraf/pull/887). +The reason for this is that saving the state in a global variable is not +thread-safe (see [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102)), +and this creates issues if users want to define multiple instances +of the statsd plugin. Saving state on reload may be considered in the future, +but this would need to be implemented at a higher level and applied to all +plugins, not just statsd. + +### Features + +- Fix improper calculation of CPU percentages +- Use RFC3339 timestamps in log output. +- Non-default HTTP timeouts for RabbitMQ plugin. +- "Discard" output plugin added, primarily for testing purposes. +- The JSON parser can now parse an array of objects using the same configuration. +- Option to use device name rather than path for reporting disk stats. +- Telegraf "internal" plugin for collecting stats on itself. +- Update GoLang version to 1.7.4. +- Support a metric.Split function. +- Elasticsearch "shield" (basic auth) support doc. +- Fix over-querying of cloudwatch metrics +- OpenTSDB basic auth support. +- RabbitMQ Connection metrics. +- HAProxy session limit metric. +- Accept strings for StatsD sets. +- Change StatsD default "reset" behavior. +- Enable setting ClientID in MQTT output. +- MongoDB input plugin: Improve state data. +- Ping input: add standard deviation field. +- Add GC pause metric to InfluxDB input plugin. +- Added response_timeout property to prometheus input plugin. +- Pulling github.com/lxn/win's pdh wrapper into Telegraf. +- Support negative statsd counters. +- Elasticsearch cluster stats support. +- Change Amazon Kinesis output plugin to use the built-in serializer plugins. +- Hide username/password from elasticsearch error log messages. +- Configurable HTTP timeouts in Jolokia plugin. +- Allow changing jolokia attribute delimiter. + +### Bug fixes + +- Fix the Value data format not trimming null characters from input. +- Fix windows `.net` plugin. +- Cache & expire metrics for delivery to prometheus +- Fix potential panic in aggregator plugin metric maker. +- Add optional ability to define PID as a tag. +- Fix win_perf_counters not gathering non-English counters. +- Fix panic when file stat info cannot be collected due to permissions or other issue(s). +- Graylog output should set short_message field. +- Hddtemp always put the value in the field temperature. +- Properly collect nested jolokia struct data. +- Fix puppetagent inputs plugin to support string for config variable. +- Fix docker input plugin tags when registry has port. +- Fix tail input when reading from a pipe. +- MongoDB plugin always shows 0 replication lag. +- Consul plugin: add check_id as a tag in metrics to avoid overwrites. +- Partial fix: logparser CLF pattern with IPv6 addresses. +- Fix thread-safety when using multiple instances of the statsd input plugin. +- Docker input: interface conversion panic fix. +- SNMP: ensure proper context is present on error messages. +- OpenTSDB: add tcp:// prefix if no scheme provided. +- Influx parser: parse line-protocol without newlines. +- InfluxDB output: fix field type conflict blocking output buffer. + +## v1.1.2 [2016-12-12] + +### Bug fixes + +- Make snmptranslate not required when using numeric OID. +- Add a global snmp translation cache. + +## v1.1.1 [2016-11-14] + +### Bug fixes + +- Fix issue parsing toml durations with single quotes. + +## v1.1.0 [2016-11-07] + +### Release Notes + +- Telegraf now supports two new types of plugins: processors & aggregators. + +- On systemd Telegraf will no longer redirect it's stdout to /var/log/telegraf/telegraf.log. +On most systems, the logs will be directed to the systemd journal and can be +accessed by `journalctl -u telegraf.service`. Consult the systemd journal +documentation for configuring journald. There is also a [`logfile` config option](https://github.com/influxdata/telegraf/blob/release-1.8/etc/telegraf.conf#L70) +available in 1.1, which will allow users to easily configure telegraf to +continue sending logs to /var/log/telegraf/telegraf.log. + +### Features + +- Processor & Aggregator plugin support. +- Adding the tags in the graylog output plugin. +- Telegraf systemd service, log to journal. +- Allow numeric and non-string values for tag_keys. +- Adding Gauge and Counter metric types. +- Remove carraige returns from exec plugin output on Windows +- Elasticsearch input: configurable timeout. +- Massage metric names in Instrumental output plugin +- Apache Mesos improvements. +- Add Ceph Cluster Performance Statistics +- Ability to configure response_timeout in httpjson input. +- Add additional redis metrics. +- Added capability to send metrics through HTTP API for OpenTSDB. +- iptables input plugin. +- Add filestack webhook plugin. +- Add server hostname for each Docker measurements. +- Add NATS output plugin. +- HTTP service listener input plugin. +- Add database blacklist option for Postgresql +- Add Docker container state metrics to Docker input plugin output +- Add support to SNMP for IP & MAC address conversion. +- Add support to SNMP for OID index suffixes. +- Change default arguments for SNMP plugin. +- Apach Mesos input plugin: very high-cardinality mesos-task metrics removed. +- Logging overhaul to centralize the logger & log levels, & provide a logfile config option. +- HAProxy plugin socket glob matching. +- Add Kubernetes plugin for retrieving pod metrics. + +### Bug fixes + +- Fix NATS plug-ins reconnection logic. +- Set required default values in udp_listener & tcp_listener. +- Fix toml unmarshal panic in Duration objects. +- Fix handling of non-string values for JSON keys listed in tag_keys. +- Fix mongodb input panic on version 2.2. +- Fix statsd scientific notation parsing. +- Sensors plugin strconv.ParseFloat: parsing "": invalid syntax. +- Fix prometheus_client reload panic. +- Fix Apache Kafka consumer panic when nil error is returned down errs channel. +- Speed up statsd parsing. +- Fix powerdns integer parse error handling. +- Fix varnish plugin defaults not being used. +- Fix Windows glob paths. +- Fix issue loading config directory on Windows. +- Windows remote management interactive service fix. +- SQLServer, fix issue when case sensitive collation is activated. +- Fix huge allocations in http_listener when dealing with huge payloads. +- Fix translating SNMP fields not in MIB. +- Fix SNMP emitting empty fields. +- SQL Server waitstats truncation bug. +- Fix logparser common log format: numbers in ident. +- Fix JSON Serialization in OpenTSDB output. +- Fix Graphite template ordering, use most specific. +- Fix snmp table field initialization for non-automatic table. +- cgroups path being parsed as metric. +- Fix phpfpm fcgi client panic when URL does not exist. +- Fix config file parse error logging. +- Delete nil fields in the metric maker. +- Fix MySQL special characters in DSN parsing. +- Ping input odd timeout behavior. +- Switch to github.com/kballard/go-shellquote. + +## v1.0.1 [2016-09-26] + +### Bug fixes + +- Prometheus output: Fix bug with multi-batch writes. +- Fix unmarshal of influxdb metrics with null tags. +- Add configurable timeout to influxdb input plugin. +- Fix statsd no default value panic. + +## v1.0 [2016-09-08] + +### Release Notes + +**Breaking Change** The SNMP plugin is being deprecated in it's current form. +There is a [new SNMP plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/snmp) +which fixes many of the issues and confusions +of its predecessor. For users wanting to continue to use the deprecated SNMP +plugin, you will need to change your config file from `[[inputs.snmp]]` to +`[[inputs.snmp_legacy]]`. The configuration of the new SNMP plugin is _not_ +backwards-compatible. + +**Breaking Change**: Aerospike main server node measurements have been renamed +aerospike_node. Aerospike namespace measurements have been renamed to +aerospike_namespace. They will also now be tagged with the node_name +that they correspond to. This has been done to differentiate measurements +that pertain to node vs. namespace statistics. + +**Breaking Change**: users of github_webhooks must change to the new +`[[inputs.webhooks]]` plugin. + +This means that the default github_webhooks config: + +``` +# A Github Webhook Event collector +[[inputs.github_webhooks]] + ## Address and port to host Webhook listener on + service_address = ":1618" +``` + +should now look like: + +``` +# A Webhooks Event collector +[[inputs.webhooks]] + ## Address and port to host Webhook listener on + service_address = ":1618" + + [inputs.webhooks.github] + path = "/" +``` + +- Telegraf now supports being installed as an official windows service, +which can be installed via +`> C:\Program Files\Telegraf\telegraf.exe --service install` + +- `flush_jitter` behavior has been changed. The random jitter will now be +evaluated at every flush interval, rather than once at startup. This makes it +consistent with the behavior of `collection_jitter`. + +- PostgresSQL plugins now handle oid and name typed columns seamlessly, previously they were ignored/skipped. + +### Features + +- postgresql_extensible now handles name and oid types correctly. +- Separate container_version from container_image tag. +- Support setting per-device and total metrics for Docker network and blockio. +- MongoDB input plugin: adding per DB stats from db.stats() +- Add tls support for certs to RabbitMQ input plugin. +- Webhooks input plugin. +- Rollbar webhook plugin. +- Mandrill webhook plugin. +- docker-machine/boot2docker no longer required for unit tests. +- cgroup input plugin. +- Add input plugin for consuming metrics from NSQD. +- Add ability to read Redis from a socket. +- **Breaking Change** - Redis `role` tag renamed to `replication_role` to avoid global_tags override. +- Fetching Galera status metrics in MySQL +- Aerospike plugin refactored to use official client library. +- Add measurement name arg to logparser plugin. +- logparser: change resp_code from a field to a tag. +- Implement support for fetching hddtemp data +- statsd: do not log every dropped metric. +- Add precision rounding to all metrics on collection. +- Add support for Tengine. +- Logparser input plugin for parsing grok-style log patterns. +- ElasticSearch: now supports connecting to ElasticSearch via SSL. +- Add graylog input pluging. +- Consul input plugin. +- conntrack input plugin. +- vmstat input plugin. +- Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. +- Add SSL config options to http_response plugin. +- Graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser. +- Make DNS lookups for chrony configurable. +- Allow wildcard filtering of varnish stats. +- Support for glob patterns in exec plugin commands configuration. +- RabbitMQ input: made url parameter optional by using DefaultURL (`http://localhost:15672`) if not specified. +- Limit AWS GetMetricStatistics requests to 10 per second. +- RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified. +- Refactor of flush_jitter argument. +- Add inactive & active memory to mem plugin. +- Official Windows service. +- Forking sensors command to remove C package dependency. +- Add a new SNMP plugin. + +### Bug fixes + +- Fix `make windows` build target. +- Fix error race conditions and partial failures. +- nstat: fix inaccurate config panic. +- jolokia: fix handling multiple multi-dimensional attributes. +- Fix prometheus character sanitizing. Sanitize more win_perf_counters characters. +- Add diskio io_time to FreeBSD & report timing metrics as ms (as linux does). +- Fix covering Amazon Linux for post remove flow. +- procstat missing fields: read/write bytes & count. +- diskio input plugin: set 'skip_serial_number = true' by default to avoid high cardinality. +- nil metrics panic fix. +- Fix datarace in apache input plugin. +- Add `read_repairs` statistics to riak plugin. +- Fix memory/connection leak in Prometheus input plugin. +- Trim BOM from config file for Windows support. +- Prometheus client output panic on service reload. +- Prometheus parser, protobuf format header fix. +- Prometheus output, metric refresh and caching fixes. +- Panic fix for multiple graphite outputs under very high load. +- Instrumental output has better reconnect behavior. +- Remove PID from procstat plugin to fix cardinality issues. +- Cassandra input: version 2.x "column family" fix. +- Shared WaitGroup in Exec plugin. +- logparser: honor modifiers in "pattern" config. +- logparser: error and exit on file permissions/missing errors. +- Make the user able to specify full path for HAproxy stats. +- Fix Redis url, an extra "tcp://" was added. +- Fix exec plugin panic when using single binary. +- Fixed incorrect prometheus metrics source selection. +- Set default Zookeeper chroot to empty string. +- Fix overall ping timeout to be calculated based on per-ping timeout. +- Change "default" retention policy to "". +- Graphite output mangling '%' character. +- Prometheus input plugin now supports x509 certs authentication. +- Fix systemd service. +- Fix influxdb n_shards counter. +- Fix potential kernel plugin integer parse error. +- Fix potential influxdb input type assertion panic. +- Still send processes metrics if a process exited during metric collection. +- disk plugin panic when usage grab fails. +- Removed leaked "database" tag on redis metrics. +- Processes plugin: fix potential error with /proc/net/stat directory. +- Fix rare RHEL 5.2 panic in gopsutil diskio gathering function. +- Remove IF NOT EXISTS from influxdb output database creation. +- Fix quoting with text values in postgresql_extensible plugin. +- Fix win_perf_counter "index out of range" panic. +- Fix ntpq panic when field is missing. +- Sanitize graphite output field names. +- Fix MySQL plugin not sending 0 value fields. diff --git a/data/telegraf_plugins.yml b/data/telegraf_plugins.yml index 21e55e5db..cd87d4ed9 100644 --- a/data/telegraf_plugins.yml +++ b/data/telegraf_plugins.yml @@ -175,6 +175,13 @@ input: introduced: 1.14.0 tags: [linux, macos, windows, networking] + - name: AWS CloudWatch Metric Streams + id: cloudwatch_metric_streams + description: | + The CloudWatch Metric Streams plugin is a service input plugin that listens for metrics sent via HTTP and performs the required processing for metric streams from AWS. + introduced: 1.24.0 + tags: [linux, macos, windows, aws] + - name: Azure Storage Queue id: azure_storage_queue description: | @@ -1031,6 +1038,13 @@ input: introduced: 0.1.5 tags: [linux, macos, windows, systems, data-stores] + - name: Linux CPU + id: linux_cpu + description: | + The Linux CPU input plugin athers CPU metrics exposed on Linux-based systems. + introduced: 1.24.0 + tags: [linux, systems] + - name: Linux Sysctl FS id: linux_sysctl_fs description: | @@ -1358,6 +1372,14 @@ input: introduced: 0.13.1 tags: [linux, macos, windows, networking, systems] + - name: NSDP + id: nsdp + description: | + The NSDP input plugin gathers switch network statistics. + introduced: 1.24.0 + tags: [linux, macos, windows, networking, systems] + external: true + - name: NTPq id: ntpq description: | @@ -1806,6 +1828,13 @@ input: introduced: 0.2.0 tags: [linux, macos, windows, applications] + - name: Supervisor + id: supervisor + description: | + The supervisor input gathers information about processes that running under supervisor using XML-RPC API. + introduced: 1.24.0 + tags: [linux, macos, windows] + - name: Suricata id: suricata description: | @@ -1970,6 +1999,13 @@ input: introduced: 1.5.0 tags: [linux, macos, windows, networking] + - name: UPSD + id: upsd + description: | + The UPSD input plugin reads data of one or more Uninterruptible Power Supplies from an upsd daemon using its NUT network protocol. + introduced: 1.24.0 + tags: [linux, macos, windows, networking] + - name: uWSGI id: uwsgi description: | @@ -2431,6 +2467,13 @@ output: introduced: 0.1.9 tags: [linux, macos, windows, data-stores] + - name: Postgre SQL + id: postgresql + description: | + The Postgre SQL output plugin writes metrics to PostgreSQL (or compatible database. + introduced: 1.24.0 + tags: [linux, macos, windows] + - name: Prometheus Client id: prometheus_client description: | @@ -2439,6 +2482,13 @@ output: introduced: 0.2.1 tags: [linux, macos, windows, applications] + - name: RedisTimeSeries + id: redistimeseries + description: | + The RedisTimeSeries output plugin writes metrics to the RedisTimeSeries server. + introduced: 1.24.0 + tags: [linux, macos, windows, networking] + - name: Riemann id: riemann description: | @@ -2493,6 +2543,13 @@ output: introduced: 1.9.0 tags: [linux, macos, windows, cloud] + - name: Stomp (ActiveMQ) + id: stomp + description: | + The Stomp (ActiveMQ) output plugin writes to an [ActiveMQ Broker](https://activemq.apache.org/) for [STOMP](http://stomp.github.io). + introduced: 1.24.0 + tags: [linux, macos, windows] + - name: SQL id: sql description: | From 260ed64b3bb9cfad21cd4b177511b1d77e6bd997 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 15 Sep 2022 11:44:08 -0500 Subject: [PATCH 02/17] fix(api): Check all nodes for a description field (#4458) - Replace named nodes in decorator with the any key. - If the node description field is a string, replace URLs. --- .../plugins/decorators/replace-shortcodes.js | 75 ++----------------- 1 file changed, 5 insertions(+), 70 deletions(-) diff --git a/api-docs/openapi/plugins/decorators/replace-shortcodes.js b/api-docs/openapi/plugins/decorators/replace-shortcodes.js index 8d6100faa..17d95f1cb 100644 --- a/api-docs/openapi/plugins/decorators/replace-shortcodes.js +++ b/api-docs/openapi/plugins/decorators/replace-shortcodes.js @@ -14,78 +14,13 @@ function replaceDocsUrl(field) { /** @type {import('@redocly/openapi-cli').OasDecorator} */ function docsUrl() { return { - DefinitionRoot: { - Example: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); - }, - }, - ExternalDocs: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); - }, - }, - Header: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); - }, - }, - Info: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); - }, - }, - Operation: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); - }, - }, - Parameter: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); + any: { + leave(node, ctx) { + if(node.description && typeof(node.description) === 'string') { + node.description = replaceDocsUrl(node.description); } }, - PathItem: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); - } - }, - RequestBody: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); - } - }, - Response: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); - } - }, - Schema: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); - } - }, - SecurityScheme: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); - } - }, - Server: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); - } - }, - Tag: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); - } - }, - XCodeSample: { - leave(node, ctx) { - node.description = replaceDocsUrl(node.description); - } - } - } + }, } } From c4c5d9d1e90549063c61b610c3d2b4ec52a988c0 Mon Sep 17 00:00:00 2001 From: Adam 2 Date: Thu, 15 Sep 2022 15:13:50 -0700 Subject: [PATCH 03/17] Update native-subscriptions.md (#4461) Update spelling. --- .../influxdb/cloud/write-data/no-code/native-subscriptions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb/cloud/write-data/no-code/native-subscriptions.md b/content/influxdb/cloud/write-data/no-code/native-subscriptions.md index 7d20e7d63..94bab3189 100644 --- a/content/influxdb/cloud/write-data/no-code/native-subscriptions.md +++ b/content/influxdb/cloud/write-data/no-code/native-subscriptions.md @@ -76,7 +76,7 @@ JSON parsing is faster and more efficient than string parsing. We recommend usin {{% tab-content %}} Use line protocol to write data into InfluxDB. Line protocol doesn't require any parsing or configuration. -- Select a **Timepstamp precision** from the dropdown menu: +- Select a **Timestamp precision** from the dropdown menu: - **MS**: Milliseconds - **S**: Seconds - **US**: Microseconds From 5a0bbbbbe7382c83473f4170f48fafca52929608 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Thu, 15 Sep 2022 16:16:11 -0600 Subject: [PATCH 04/17] hotfix: update sql driverName param, closes influxdata/DAR#328 --- content/flux/v0.x/query-data/sql/_index.md | 2 +- content/flux/v0.x/write-data/sql/_index.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/content/flux/v0.x/query-data/sql/_index.md b/content/flux/v0.x/query-data/sql/_index.md index a32b9081c..e6655ef1d 100644 --- a/content/flux/v0.x/query-data/sql/_index.md +++ b/content/flux/v0.x/query-data/sql/_index.md @@ -105,7 +105,7 @@ Given the following **example_table** in a MySQL database: import "sql" sql.from( - driver: "mysql", + driverName: "mysql", dataSourceName: "username:passwOrd@tcp(localhost:3306)/db", query: "SELECT ID, Name FROM example_table", ) diff --git a/content/flux/v0.x/write-data/sql/_index.md b/content/flux/v0.x/write-data/sql/_index.md index 506e0f7a6..be4914b27 100644 --- a/content/flux/v0.x/write-data/sql/_index.md +++ b/content/flux/v0.x/write-data/sql/_index.md @@ -112,7 +112,7 @@ import "sql" data |> sql.to( - driver: "mysql", + driverName: "mysql", dataSourceName: "username:passwOrd@tcp(localhost:3306)/db", table: "exampleTable" ) From 10bc089d2defc799b209a3fb8a9b1018edd6f1b3 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Fri, 16 Sep 2022 13:00:24 -0600 Subject: [PATCH 05/17] hotfix: fix telegraf nav, add influxdbu banner to resources landing --- content/resources/_index.md | 4 +++- content/telegraf/v1.23/install.md | 2 +- content/telegraf/v1.23/metrics.md | 2 +- content/telegraf/v1.24/install.md | 2 +- content/telegraf/v1.24/metrics.md | 2 +- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/content/resources/_index.md b/content/resources/_index.md index 3a6d277d3..9303928b3 100644 --- a/content/resources/_index.md +++ b/content/resources/_index.md @@ -7,4 +7,6 @@ menu: name: Additional resources --- -{{< children >}} \ No newline at end of file +{{< children >}} + +{{< influxdbu >}} \ No newline at end of file diff --git a/content/telegraf/v1.23/install.md b/content/telegraf/v1.23/install.md index e6a6a34bf..67507e07c 100644 --- a/content/telegraf/v1.23/install.md +++ b/content/telegraf/v1.23/install.md @@ -2,7 +2,7 @@ title: Install Telegraf description: Install Telegraf on your operating system. menu: - telegraf_1.23: + telegraf_1_23: name: Install weight: 20 diff --git a/content/telegraf/v1.23/metrics.md b/content/telegraf/v1.23/metrics.md index ea64c820d..78a34f6b9 100644 --- a/content/telegraf/v1.23/metrics.md +++ b/content/telegraf/v1.23/metrics.md @@ -2,7 +2,7 @@ title: Telegraf metrics description: Telegraf metrics are internal representations used to model data during processing and are based on InfluxDB's data model. Each metric component includes the measurement name, tags, fields, and timestamp. menu: - telegraf_1.23: + telegraf_1_23: name: Metrics weight: 10 parent: Concepts diff --git a/content/telegraf/v1.24/install.md b/content/telegraf/v1.24/install.md index e6a6a34bf..f267a41a1 100644 --- a/content/telegraf/v1.24/install.md +++ b/content/telegraf/v1.24/install.md @@ -2,7 +2,7 @@ title: Install Telegraf description: Install Telegraf on your operating system. menu: - telegraf_1.23: + telegraf_1_24: name: Install weight: 20 diff --git a/content/telegraf/v1.24/metrics.md b/content/telegraf/v1.24/metrics.md index ea64c820d..6f16c559d 100644 --- a/content/telegraf/v1.24/metrics.md +++ b/content/telegraf/v1.24/metrics.md @@ -2,7 +2,7 @@ title: Telegraf metrics description: Telegraf metrics are internal representations used to model data during processing and are based on InfluxDB's data model. Each metric component includes the measurement name, tags, fields, and timestamp. menu: - telegraf_1.23: + telegraf_1_24: name: Metrics weight: 10 parent: Concepts From 7fcdead0ea61e950440fa19599c69b12bd64ace7 Mon Sep 17 00:00:00 2001 From: peterreg Date: Mon, 19 Sep 2022 14:50:05 -0700 Subject: [PATCH 06/17] Adding influxDB videos for season 7 (#4457) * This adds videos for Seasons 4-6 * Update Intro-to-Client-Libraries.md * Update API-Invocable-Scripts.md * Update Edge-Data-Replication.md * Update Flux-Data-Structure.md * Update Flux-Functions.md * Update Optimizing-Flux-Functions.md * Update Intro-to-Check-&-Notifications.md * Update Checks-&-Notification-in-Action.md * Update Aggregator-&-Processor-Plugins-in-Telegraf.md * Update Using-MQTT-and-InfluxDB-for-IoT.md * Update Basics-of-Geo-temporal-data-and-InfluxDB.md * Update Flux-and-S2-Geometry.md * Update Using-Flux-to-query-geo-temporal-data.md * Update InfluxDB-in-the-IoT-stack.md * Update Architecture-Overview-for-IoT-and-InfluxDB.md * Update Configuring Replications For Downsampling To Cloud.md * Update Retaining-Data-Shape-When-Downsampling-To-Cloud.md * Update Using-Scripts-in-InfluxDB.md * Update Invocable-Scripts-and-Tasks.md * Update Invocable-Scripts-for-Alerting.md * Update Batching-Data-with-the-Python-Client-Library.md * Update Querying-Data-with-the-Python-Client-Library.md * Update Using-the-InfluxDB-Python-Client-Library-Administrative-API.md * Update Basics-of-Geo-temporal-data-and-InfluxDB.md * Update Basics-of-Geo-temporal-data-and-InfluxDB.md * Update content/resources/videos/API-Invocable-Scripts.md * Update content/resources/videos/Checks-&-Notification-in-Action.md * Update API-Invocable-Scripts.md * Testing Circle ci fix * Edits for videos seasons 4-6 * Resolve conflict duplicate videos * Further revisions to videos seasons 4-6 * Adding Season 7 videos for the Meet the Developer series * Update content/resources/videos/Configuring-and-Parsing-Data-with-InfluxDB-Native-Collector-for-MQTT.md Co-authored-by: kelseiv <47797004+kelseiv@users.noreply.github.com> * Update content/resources/videos/Explicit-Schemas-in-InfluxDB.md Co-authored-by: kelseiv <47797004+kelseiv@users.noreply.github.com> * Update content/resources/videos/Getting-Started-with-the-Node.js-Client-Library-in-InfluxDB.md Co-authored-by: kelseiv <47797004+kelseiv@users.noreply.github.com> * Update content/resources/videos/Intro-to-InfluxDB-Native-Collector.md Co-authored-by: kelseiv <47797004+kelseiv@users.noreply.github.com> Co-authored-by: kelseiv <47797004+kelseiv@users.noreply.github.com> Co-authored-by: Jason Stirnaman --- ...atching-Data-with-the-Python-Client-Library.md | 2 +- ...ring Replications For Downsampling To Cloud.md | 15 +++++++++++++++ ...ata-with-InfluxDB-Native-Collector-for-MQTT.md | 15 +++++++++++++++ .../videos/Explicit-Schemas-in-InfluxDB.md | 15 +++++++++++++++ ...with-the-Node.js-Client-Library-in-InfluxDB.md | 15 +++++++++++++++ .../videos/Intro-to-InfluxDB-Native-Collector.md | 15 +++++++++++++++ ...uerying-Data-with-the-Python-Client-Library.md | 2 +- ...g-Pandas-Dataframes-in-InfluxDB-with-Python.md | 15 +++++++++++++++ ...n-to-use-InfluxDB-Native-Collector-for-MQTT.md | 15 +++++++++++++++ content/resources/videos/_index.md | 3 +++ 10 files changed, 110 insertions(+), 2 deletions(-) create mode 100644 content/resources/videos/Configuring Replications For Downsampling To Cloud.md create mode 100644 content/resources/videos/Configuring-and-Parsing-Data-with-InfluxDB-Native-Collector-for-MQTT.md create mode 100644 content/resources/videos/Explicit-Schemas-in-InfluxDB.md create mode 100644 content/resources/videos/Getting-Started-with-the-Node.js-Client-Library-in-InfluxDB.md create mode 100644 content/resources/videos/Intro-to-InfluxDB-Native-Collector.md create mode 100644 content/resources/videos/Using-Pandas-Dataframes-in-InfluxDB-with-Python.md create mode 100644 content/resources/videos/When-to-use-InfluxDB-Native-Collector-for-MQTT.md diff --git a/content/resources/videos/Batching-Data-with-the-Python-Client-Library.md b/content/resources/videos/Batching-Data-with-the-Python-Client-Library.md index cee214aaa..b1b123be2 100644 --- a/content/resources/videos/Batching-Data-with-the-Python-Client-Library.md +++ b/content/resources/videos/Batching-Data-with-the-Python-Client-Library.md @@ -1,7 +1,7 @@ --- title: Batching Data with the Python Client Library description: > - If you're already a Python user, InfluxDB's Python client library lets you use a familiar language, to quickly get up-to-speed with InfluxDB. Here, Sunbrye Ly discusses the different options and settings around batching data using the Python client library. + If you're already a Python user, InfluxDB's Python client library lets you use a familiar language to quickly get up-to-speed with InfluxDB. Here, Sunbrye Ly discusses the different options and settings around batching data using the Python client library. menu: resources: parent: Videos diff --git a/content/resources/videos/Configuring Replications For Downsampling To Cloud.md b/content/resources/videos/Configuring Replications For Downsampling To Cloud.md new file mode 100644 index 000000000..0914b2505 --- /dev/null +++ b/content/resources/videos/Configuring Replications For Downsampling To Cloud.md @@ -0,0 +1,15 @@ +--- +title: Configuring Replications For Downsampling To Cloud +description: > + Getting data from the edge to the cloud is often a critical process for IoT and IIoT processes. Here, Sam Dillard describes how to configure the Edge Data Replication (EDR) feature in InfluxDB, which automates the transfer of data from the edge to the cloud in a reliable, durable way. +menu: + resources: + parent: Videos +weight: 153 +youtubeID: yCComgh-B74 +date: 2022-6-30 +series: [Meet the Developers S6] +metadata: [Meet the Developer Series] +--- + +{{< resources/video-content >}} diff --git a/content/resources/videos/Configuring-and-Parsing-Data-with-InfluxDB-Native-Collector-for-MQTT.md b/content/resources/videos/Configuring-and-Parsing-Data-with-InfluxDB-Native-Collector-for-MQTT.md new file mode 100644 index 000000000..38be53acf --- /dev/null +++ b/content/resources/videos/Configuring-and-Parsing-Data-with-InfluxDB-Native-Collector-for-MQTT.md @@ -0,0 +1,15 @@ +--- +title: Configuring and Parsing Data with InfluxDB Native Collector for MQTT +description: > + Setting up the InfluxDB native collector for MQTT is a simple, three step process. Here, Gary Fowler walks through the configuration process and discusses the different parsing options available. +menu: + resources: + parent: Videos +weight: 162 +youtubeID: uI_HYgx_PIQ +date: 2022-7-12 +series: [Meet the Developers S7] +metadata: [Meet the Developer Series] +--- + +{{< resources/video-content >}} diff --git a/content/resources/videos/Explicit-Schemas-in-InfluxDB.md b/content/resources/videos/Explicit-Schemas-in-InfluxDB.md new file mode 100644 index 000000000..27579a592 --- /dev/null +++ b/content/resources/videos/Explicit-Schemas-in-InfluxDB.md @@ -0,0 +1,15 @@ +--- +title: Explicit Schemas in InfluxDB +description: > + InfluxDB is a schemaless database. But what if you *want* a schema for your time series data? Well, you can choose to enforce an explicit schema at the bucket level. Here, Zoe Steinkamp covers the basics of schemas, and how to go about using explicit schemas in InfluxDB. +menu: + resources: + parent: Videos +weight: 163 +youtubeID: MkbhvurkekE +date: 2022-7-12 +series: [Meet the Developers S7] +metadata: [Meet the Developer Series] +--- + +{{< resources/video-content >}} diff --git a/content/resources/videos/Getting-Started-with-the-Node.js-Client-Library-in-InfluxDB.md b/content/resources/videos/Getting-Started-with-the-Node.js-Client-Library-in-InfluxDB.md new file mode 100644 index 000000000..188c54033 --- /dev/null +++ b/content/resources/videos/Getting-Started-with-the-Node.js-Client-Library-in-InfluxDB.md @@ -0,0 +1,15 @@ +--- +title: Getting Started with the Node.js Client Library in InfluxDB +description: > + If you use Node.js, the Node.js client library let you interact with the InfluxDB platform quickly, using a familiar language. Here, Zoe Steinkamp discusses some of the features of the Node.js client library to help you get started building awesome applications with InfluxDB even faster. +menu: + resources: + parent: Videos +weight: 164 +youtubeID: VxQVda-ilIo +date: 2022-7-12 +series: [Meet the Developers S7] +metadata: [Meet the Developer Series] +--- + +{{< resources/video-content >}} diff --git a/content/resources/videos/Intro-to-InfluxDB-Native-Collector.md b/content/resources/videos/Intro-to-InfluxDB-Native-Collector.md new file mode 100644 index 000000000..9dbe1d080 --- /dev/null +++ b/content/resources/videos/Intro-to-InfluxDB-Native-Collector.md @@ -0,0 +1,15 @@ +--- +title: Intro to InfluxDB Native Collector +description: > + InfluxDB Native Collector lets you set up direct, cloud-to-cloud connections between web services and InfluxDB to accelerate data collection. Here, Gary Fowler discusses the basics of the Native Collector feature. +menu: + resources: + parent: Videos +weight: 160 +youtubeID: J3bQrL8ihSU +date: 2022-7-12 +series: [Meet the Developers S7] +metadata: [Meet the Developer Series] +--- + +{{< resources/video-content >}} diff --git a/content/resources/videos/Querying-Data-with-the-Python-Client-Library.md b/content/resources/videos/Querying-Data-with-the-Python-Client-Library.md index b0f5274ca..c9bad51bf 100644 --- a/content/resources/videos/Querying-Data-with-the-Python-Client-Library.md +++ b/content/resources/videos/Querying-Data-with-the-Python-Client-Library.md @@ -1,7 +1,7 @@ --- title: Querying Data with the Python Client Library description: > - If you're already a Python user, InfluxDB's Python client library lets you use a familiar language, to quickly get up-to-speed with InfluxDB. Here, Sunbrye Ly discusses the different options and settings for querying data using the Python client library. + If you're already a Python user, InfluxDB's Python client library lets you use a familiar language to quickly get up-to-speed with InfluxDB. Here, Sunbrye Ly discusses the different options and settings for querying data using the Python client library. menu: resources: parent: Videos diff --git a/content/resources/videos/Using-Pandas-Dataframes-in-InfluxDB-with-Python.md b/content/resources/videos/Using-Pandas-Dataframes-in-InfluxDB-with-Python.md new file mode 100644 index 000000000..a63de8c7c --- /dev/null +++ b/content/resources/videos/Using-Pandas-Dataframes-in-InfluxDB-with-Python.md @@ -0,0 +1,15 @@ +--- +title: Using Pandas Dataframes in InfluxDB with Python +description: > + A pandas dataframe might not be as cute and cuddly as a real panda, but for data scientists pandas dataframes are important data sources. Here, Zoe Steinkamp goes over some of the basics of using pandas dataframes with InfluxDB and the difference between pandas and Flux (InfluxDB's native query and analytics language). +menu: + resources: + parent: Videos +weight: 165 +youtubeID: cMkQXLCbFQY +date: 2022-9-12 +series: [Meet the Developers S7] +metadata: [Meet the Developer Series] +--- + +{{< resources/video-content >}} diff --git a/content/resources/videos/When-to-use-InfluxDB-Native-Collector-for-MQTT.md b/content/resources/videos/When-to-use-InfluxDB-Native-Collector-for-MQTT.md new file mode 100644 index 000000000..d963c0810 --- /dev/null +++ b/content/resources/videos/When-to-use-InfluxDB-Native-Collector-for-MQTT.md @@ -0,0 +1,15 @@ +--- +title: When to use InfluxDB Native Collector for MQTT +description: > + InfluxDB provides many ways to collect data. The newest one is the Native Collector feature. But when should you use the native collector and when should you use a different option, like Telegraf? Gary Fowler uses MQTT to show the different options and why you might choose one over the other. +menu: + resources: + parent: Videos +weight: 161 +youtubeID: I583LSj0bgs +date: 2022-9-12 +series: [Meet the Developers S7] +metadata: [Meet the Developer Series] +--- + +{{< resources/video-content >}} diff --git a/content/resources/videos/_index.md b/content/resources/videos/_index.md index bef1cfcfa..5c226e946 100644 --- a/content/resources/videos/_index.md +++ b/content/resources/videos/_index.md @@ -28,4 +28,7 @@ menu: #### Season 6 {{< resources/video-cards series="Meet the Developers S6" >}} +#### Season 7 +{{< resources/video-cards series="Meet the Developers S7" >}} + For additional product and tutorial videos, please visit our [YouTube channel](https://www.youtube.com/channel/UCnrgOD6G0y0_rcubQuICpTQ). From d3d152230e6bb66c6f7503cdc7f6f9a93621b5b2 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Tue, 20 Sep 2022 16:43:54 -0600 Subject: [PATCH 07/17] hotfix: remove duplicate video file, fix table styles --- assets/styles/layouts/article/_tables.scss | 2 +- ...ring Replications For Downsampling To Cloud.md | 15 --------------- 2 files changed, 1 insertion(+), 16 deletions(-) delete mode 100644 content/resources/videos/Configuring Replications For Downsampling To Cloud.md diff --git a/assets/styles/layouts/article/_tables.scss b/assets/styles/layouts/article/_tables.scss index f3c05f382..258064cd2 100644 --- a/assets/styles/layouts/article/_tables.scss +++ b/assets/styles/layouts/article/_tables.scss @@ -2,7 +2,7 @@ table { display: inline-block; - margin: 1rem 0 3rem; + margin: 1rem 1rem 3rem 0; border-spacing: 0; color: $article-text; max-width: 100%; diff --git a/content/resources/videos/Configuring Replications For Downsampling To Cloud.md b/content/resources/videos/Configuring Replications For Downsampling To Cloud.md deleted file mode 100644 index 0914b2505..000000000 --- a/content/resources/videos/Configuring Replications For Downsampling To Cloud.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Configuring Replications For Downsampling To Cloud -description: > - Getting data from the edge to the cloud is often a critical process for IoT and IIoT processes. Here, Sam Dillard describes how to configure the Edge Data Replication (EDR) feature in InfluxDB, which automates the transfer of data from the edge to the cloud in a reliable, durable way. -menu: - resources: - parent: Videos -weight: 153 -youtubeID: yCComgh-B74 -date: 2022-6-30 -series: [Meet the Developers S6] -metadata: [Meet the Developer Series] ---- - -{{< resources/video-content >}} From eeaab0f240011ed68b5092b1a2415089cdd1b60a Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 21 Sep 2022 09:22:49 -0500 Subject: [PATCH 08/17] chore(api): Updates API specs for users and auths. Fixes whitespace, punctuation, grammar, links, and other style issues. (#4469) --- api-docs/cloud/ref.yml | 3977 +++++++++++----------------- api-docs/cloud/swaggerV1Compat.yml | 163 +- api-docs/v2.4/ref.yml | 3758 +++++++++----------------- api-docs/v2.4/swaggerV1Compat.yml | 163 +- 4 files changed, 2929 insertions(+), 5132 deletions(-) diff --git a/api-docs/cloud/ref.yml b/api-docs/cloud/ref.yml index d43efda90..f5fefc0ce 100644 --- a/api-docs/cloud/ref.yml +++ b/api-docs/cloud/ref.yml @@ -1,9 +1,8 @@ components: parameters: After: - description: > - Resource ID to seek from. Results are not inclusive of this ID. Use - `after` instead of `offset`. + description: | + Resource ID to seek from. Results are not inclusive of this ID. Use `after` instead of `offset`. in: query name: after required: false @@ -75,9 +74,7 @@ components: readOnly: true type: string message: - description: >- - A human-readable message that may contain detail about the - error. + description: A human-readable message that may contain detail about the error. readOnly: true type: string description: | @@ -91,25 +88,19 @@ components: application/json: examples: orgProvidedNotFound: - summary: >- - The org or orgID passed doesn't own the token passed in the - header + summary: The org or orgID passed doesn't own the token passed in the header value: code: invalid message: 'failed to decode request body: organization not found' schema: $ref: '#/components/schemas/Error' - description: > + description: | Bad request. - The response body contains detail about the error. - #### InfluxDB OSS - - - Returns this error if an incorrect value is passed for `org` or - `orgID`. + - Returns this error if an incorrect value is passed for `org` or `orgID`. GeneralServerError: content: application/json: @@ -145,21 +136,14 @@ components: message: organization not found schema: $ref: '#/components/schemas/Error' - description: > + description: | Not found. - A requested resource was not found. - - The response body contains the requested resource type and the name - value - + The response body contains the requested resource type and the name value (if you passed it)--for example: - - `"organization name \"my-org\" not found"` - - - `"organization not found"`: indicates you passed an ID that did not - match + - `"organization not found"`: indicates you passed an ID that did not match an organization. ServerError: content: @@ -223,6 +207,7 @@ components: readOnly: true type: string id: + description: The authorization ID. readOnly: true type: string links: @@ -239,15 +224,19 @@ components: readOnly: true type: object org: - description: The name of the organization that owns the token. + description: | + The organization name. + Specifies the [organization](/influxdb/cloud/reference/glossary/#organization) that the token is scoped to. readOnly: true type: string orgID: - description: The ID of the organization. + description: | + The organization ID. + Specifies the [organization](/influxdb/cloud/reference/glossary/#organization) that the authorization is scoped to. type: string permissions: description: | - A list of permissions for an authorization. + The list of permissions. An authorization must have at least one permission. items: $ref: '#/components/schemas/Permission' @@ -255,7 +244,11 @@ components: type: array token: description: | - The API token for authenticating InfluxDB API and CLI requests. + The API token. + [Tokens](/influxdb/cloud/reference/glossary/#token) are + used to authenticate InfluxDB API requests and `influx` CLI commands. + If authenticated, the token is allowed the permissions of the _authorization_. + The token value is unique to the authorization. readOnly: true type: string updatedAt: @@ -263,11 +256,15 @@ components: readOnly: true type: string user: - description: The name of the user that the token is scoped to. + description: | + The user name. + Specifies the [user](/influxdb/cloud/reference/glossary/#user) that owns the authorization. + If the authorization is _scoped_ to a user, the user; + otherwise, the creator of the authorization. readOnly: true type: string userID: - description: The ID of the user that the token is scoped to. + description: The user ID. Specifies the [user](/influxdb/cloud/reference/glossary/#user) that owns the authorization. If _scoped_, the user that the authorization is scoped to; otherwise, the creator of the authorization. readOnly: true type: string type: object @@ -281,9 +278,7 @@ components: type: string status: default: active - description: >- - Status of the token. If `inactive`, requests using the token will be - rejected. + description: Status of the token. If `inactive`, requests using the token will be rejected. enum: - active - inactive @@ -320,9 +315,7 @@ components: - '10' type: string bounds: - description: >- - The extents of the axis in the form [lower, upper]. Clients - determine whether bounds are inclusive or exclusive of their limits. + description: The extents of the axis in the form [lower, upper]. Clients determine whether bounds are inclusive or exclusive of their limits. items: type: string maxItems: 2 @@ -347,9 +340,7 @@ components: - linear type: string BadStatement: - description: >- - A placeholder for statements for which no correct statement nodes can be - created + description: A placeholder for statements for which no correct statement nodes can be created properties: text: description: Raw source text @@ -711,10 +702,7 @@ components: readOnly: true type: string latestCompleted: - description: >- - A timestamp ([RFC3339 date/time - format](https://docs.influxdata.com/influxdb/cloud/reference/glossary/#rfc3339-timestamp)) - of the latest scheduled and completed run. + description: A timestamp ([RFC3339 date/time format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp)) of the latest scheduled and completed run. format: date-time readOnly: true type: string @@ -851,10 +839,7 @@ components: ColorMapping: additionalProperties: type: string - description: >- - A color mapping is an object that maps time series data to a UI color - scheme to allow the UI to render graphs consistent colors across - reloads. + description: A color mapping is an object that maps time series data to a UI color scheme to allow the UI to render graphs consistent colors across reloads. example: configcat_deployments-autopromotionblocker: '#663cd0' measurement_birdmigration_europe: '#663cd0' @@ -877,9 +862,7 @@ components: nullable: false type: string ConditionalExpression: - description: >- - Selects one of two expressions, `Alternate` or `Consequent`, depending - on a third boolean expression, `Test` + description: Selects one of two expressions, `Alternate` or `Consequent`, depending on a third boolean expression, `Test` properties: alternate: $ref: '#/components/schemas/Expression' @@ -954,9 +937,7 @@ components: description: InfluxDB v1 database type: string default: - description: >- - Mapping represents the default retention policy for the database - specified. + description: Mapping represents the default retention policy for the database specified. type: boolean id: description: The ID of the DBRP mapping. @@ -971,9 +952,7 @@ components: description: InfluxDB v1 retention policy type: string virtual: - description: >- - Indicates an autogenerated, virtual mapping based on the bucket - name. Currently only available in OSS. + description: Indicates an autogenerated, virtual mapping based on the bucket name. Currently only available in OSS. type: boolean required: - id @@ -992,9 +971,7 @@ components: description: InfluxDB v1 database type: string default: - description: >- - Mapping represents the default retention policy for the database - specified. + description: Mapping represents the default retention policy for the database specified. type: boolean org: description: The name of the organization that owns this mapping. @@ -1174,10 +1151,7 @@ components: $ref: '#/components/schemas/Links' type: object DateTimeLiteral: - description: >- - Represents an instant in time with nanosecond precision in [RFC3339Nano - date/time - format](/influxdb/cloud/reference/glossary/#rfc3339nano-timestamp). + description: Represents an instant in time with nanosecond precision in [RFC3339Nano date/time format](/influxdb/cloud/reference/glossary/#rfc3339nano-timestamp). properties: type: $ref: '#/components/schemas/NodeType' @@ -1201,9 +1175,7 @@ components: description: If only zero values reported since time, trigger an alert type: boolean staleTime: - description: >- - String duration for time that a series is considered stale and - should not trigger deadman. + description: String duration for time that a series is considered stale and should not trigger deadman. type: string statusMessageTemplate: description: The template used to generate and write a status message. @@ -1229,9 +1201,7 @@ components: - type type: object DecimalPlaces: - description: >- - Indicates whether decimal places should be enforced, and how many digits - it should show. + description: Indicates whether decimal places should be enforced, and how many digits it should show. properties: digits: description: The number of digits after decimal to display @@ -1245,24 +1215,19 @@ components: description: The delete predicate request. properties: predicate: - description: > - An expression in [delete predicate - syntax](https://docs.influxdata.com/influxdb/cloud/reference/syntax/delete-predicate/). + description: | + An expression in [delete predicate syntax](/influxdb/cloud/reference/syntax/delete-predicate/). example: tag1="value1" and (tag2="value2" and tag3!="value3") type: string start: - description: > - A timestamp ([RFC3339 date/time - format](https://docs.influxdata.com/influxdb/cloud/reference/glossary/#rfc3339-timestamp)). - + description: | + A timestamp ([RFC3339 date/time format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp)). The earliest time to delete from. format: date-time type: string stop: - description: > - A timestamp ([RFC3339 date/time - format](https://docs.influxdata.com/influxdb/cloud/reference/glossary/#rfc3339-timestamp)). - + description: | + A timestamp ([RFC3339 date/time format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp)). The latest time to delete from. format: date-time type: string @@ -1271,39 +1236,24 @@ components: - stop type: object Dialect: - description: > + description: | Options for tabular data output. - - Default output is [annotated - CSV](/influxdb/cloud/reference/syntax/annotated-csv/#csv-response-format) - with headers. - + Default output is [annotated CSV](/influxdb/cloud/reference/syntax/annotated-csv/#csv-response-format) with headers. For more information about tabular data **dialect**, - - see [W3 metadata vocabulary for tabular - data](https://www.w3.org/TR/2015/REC-tabular-metadata-20151217/#dialect-descriptions). + see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-metadata-20151217/#dialect-descriptions). properties: annotations: - description: > + description: | Annotation rows to include in the results. - - An _annotation_ is metadata associated with an object (column) in - the data model. - + An _annotation_ is metadata associated with an object (column) in the data model. #### Related guides - - - See [Annotated CSV - annotations](https://docs.influxdata.com/influxdb/cloud/reference/syntax/annotated-csv/#annotations) - for examples and more information. - + - See [Annotated CSV annotations](/influxdb/cloud/reference/syntax/annotated-csv/#annotations) for examples and more information. For more information about **annotations** in tabular data, - - see [W3 metadata vocabulary for tabular - data](https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns). + see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns). items: enum: - group @@ -1314,32 +1264,22 @@ components: uniqueItems: true commentPrefix: default: '#' - description: >- - The character prefixed to comment strings. Default is a number sign - (`#`). + description: The character prefixed to comment strings. Default is a number sign (`#`). maxLength: 1 minLength: 0 type: string dateTimeFormat: default: RFC3339 - description: > + description: | The format for timestamps in results. - - Default is [`RFC3339` date/time - format](https://docs.influxdata.com/influxdb/cloud/reference/glossary/#rfc3339-timestamp). - + Default is [`RFC3339` date/time format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp). To include nanoseconds in timestamps, use `RFC3339Nano`. - #### Example formatted date/time values - | Format | Value | - |:------------|:----------------------------| - | `RFC3339` | `"2006-01-02T15:04:05Z07:00"` | - | `RFC3339Nano` | `"2006-01-02T15:04:05.999999999Z07:00"` | enum: - RFC3339 @@ -1378,9 +1318,7 @@ components: $ref: '#/components/schemas/Expression' type: object Duration: - description: >- - A pair consisting of length of time and the unit of time measured. It is - the atomic unit from which all duration literals are composed. + description: A pair consisting of length of time and the unit of time measured. It is the atomic unit from which all duration literals are composed. properties: magnitude: type: integer @@ -1390,9 +1328,7 @@ components: type: string type: object DurationLiteral: - description: >- - Represents the elapsed time between two instants as an int64 nanosecond - count with syntax of golang's time.Duration + description: Represents the elapsed time between two instants as an int64 nanosecond count with syntax of golang's time.Duration properties: type: $ref: '#/components/schemas/NodeType' @@ -1424,9 +1360,7 @@ components: readOnly: true type: string err: - description: >- - Stack of errors that occurred during processing of the request. - Useful for debugging. + description: Stack of errors that occurred during processing of the request. Useful for debugging. readOnly: true type: string message: @@ -1434,9 +1368,7 @@ components: readOnly: true type: string op: - description: >- - Describes the logical code operation when the error occurred. Useful - for debugging. + description: Describes the logical code operation when the error occurred. Useful for debugging. readOnly: true type: string required: @@ -1485,9 +1417,7 @@ components: - $ref: '#/components/schemas/UnsignedIntegerLiteral' - $ref: '#/components/schemas/Identifier' ExpressionStatement: - description: >- - May consist of an expression that doesn't return a value and is executed - solely for its side-effects + description: May consist of an expression that doesn't return a value and is executed solely for its side-effects properties: expression: $ref: '#/components/schemas/Expression' @@ -1497,9 +1427,7 @@ components: Field: properties: alias: - description: >- - Alias overrides the field name in the returned response. Applies - only if type is `func` + description: Alias overrides the field name in the returned response. Applies only if type is `func` type: string args: description: Args are the arguments to the function @@ -1507,9 +1435,7 @@ components: $ref: '#/components/schemas/Field' type: array type: - description: >- - `type` describes the field type. `func` is a function. `field` is a - field reference. + description: '`type` describes the field type. `func` is a function. `field` is a field reference.' enum: - func - field @@ -1519,9 +1445,7 @@ components: - wildcard type: string value: - description: >- - value is the value of the field. Meaning of the value is implied by - the `type` key + description: value is the value of the field. Meaning of the value is implied by the `type` key type: string type: object File: @@ -1549,9 +1473,7 @@ components: additionalProperties: true type: object FloatLiteral: - description: >- - Represents floating point numbers according to the double - representations defined by the IEEE-754-1985 + description: Represents floating point numbers according to the double representations defined by the IEEE-754-1985 properties: type: $ref: '#/components/schemas/NodeType' @@ -1796,9 +1718,7 @@ components: type: array detectCoordinateFields: default: true - description: >- - If true, search results get automatically regroupped so that lon,lat - and value are treated as columns + description: If true, search results get automatically regroupped so that lon,lat and value are treated as columns type: boolean latLonColumns: $ref: '#/components/schemas/LatLonColumns' @@ -2146,11 +2066,8 @@ components: type: object InfluxqlCsvResponse: description: CSV Response to InfluxQL Query - example: > - name,tags,time,test_field,test_tag - test_measurement,,1603740794286107366,1,tag_value - test_measurement,,1603740870053205649,2,tag_value - test_measurement,,1603741221085428881,3,tag_value + example: | + name,tags,time,test_field,test_tag test_measurement,,1603740794286107366,1,tag_value test_measurement,,1603740870053205649,2,tag_value test_measurement,,1603741221085428881,3,tag_value type: string InfluxqlJsonResponse: description: JSON Response to InfluxQL Query @@ -2216,11 +2133,9 @@ components: properties: additionalProperties: type: string - description: > + description: | Key-value pairs associated with this label. - - To remove a property, send an update with an empty value (`""`) for - the key. + To remove a property, send an update with an empty value (`""`) for the key. example: color: ffb3b3 description: this is a description @@ -2235,12 +2150,10 @@ components: properties: additionalProperties: type: string - description: > + description: | Key-value pairs associated with this label. - - To remove a property, send an update with an empty value (`""`) for - the key. + To remove a property, send an update with an empty value (`""`) for the key. example: color: ffb3b3 description: this is a description @@ -2270,12 +2183,10 @@ components: type: string properties: additionalProperties: - description: > + description: | Key-value pairs associated with this label. - - To remove a property, send an update with an empty value (`""`) - for the key. + To remove a property, send an update with an empty value (`""`) for the key. type: string example: color: ffb3b3 @@ -2335,10 +2246,8 @@ components: description: The ID of the organization that the authorization is scoped to. type: string permissions: - description: > - A list of permissions that provide `read` and `write` access to - organization resources. - + description: | + A list of permissions that provide `read` and `write` access to organization resources. An authorization must contain at least one permission. items: $ref: '#/components/schemas/Permission' @@ -2592,9 +2501,7 @@ components: readOnly: true type: string err: - description: >- - Stack of errors that occurred during processing of the request. - Useful for debugging. + description: Stack of errors that occurred during processing of the request. Useful for debugging. readOnly: true type: string line: @@ -2607,9 +2514,7 @@ components: readOnly: true type: string op: - description: >- - Describes the logical code operation when the error occurred. Useful - for debugging. + description: Describes the logical code operation when the error occurred. Useful for debugging. readOnly: true type: string required: @@ -2659,19 +2564,14 @@ components: readOnly: true type: string time: - description: >- - The time ([RFC3339Nano date/time - format](https://docs.influxdata.com/influxdb/cloud/reference/glossary/#rfc3339nano-timestamp)) - that the event occurred. + description: The time ([RFC3339Nano date/time format](/influxdb/cloud/reference/glossary/#rfc3339nano-timestamp)) that the event occurred. example: 2006-01-02T15:04:05.999999999Z07:00 format: date-time readOnly: true type: string type: object LogicalExpression: - description: >- - Represents the rule conditions that collectively evaluate to either true - or false + description: Represents the rule conditions that collectively evaluate to either true or false properties: left: $ref: '#/components/schemas/Expression' @@ -3147,22 +3047,15 @@ components: readOnly: true type: string latestCompleted: - description: >- - A timestamp ([RFC3339 date/time - format](https://docs.influxdata.com/influxdb/cloud/reference/glossary/#rfc3339-timestamp)) - of the latest scheduled and completed run. + description: A timestamp ([RFC3339 date/time format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp)) of the latest scheduled and completed run. format: date-time readOnly: true type: string limit: - description: >- - Don't notify me more than times every seconds. - If set, limitEvery cannot be empty. + description: Don't notify me more than times every seconds. If set, limitEvery cannot be empty. type: integer limitEvery: - description: >- - Don't notify me more than times every seconds. - If set, limit cannot be empty. + description: Don't notify me more than times every seconds. If set, limit cannot be empty. type: integer links: example: @@ -3494,31 +3387,21 @@ components: minimum: 0 type: integer shardGroupDurationSeconds: - description: > - The [shard group - duration](https://docs.influxdata.com/influxdb/cloud/reference/glossary/#shard). - + description: | + The [shard group duration](/influxdb/cloud/reference/glossary/#shard). The number of seconds that each shard group covers. - #### InfluxDB Cloud - - Doesn't use `shardGroupDurationsSeconds`. - #### InfluxDB OSS - - - Default value depends on the [bucket retention - period](https://docs.influxdata.com/influxdb/cloud/reference/internals/shards/#shard-group-duration). - + - Default value depends on the [bucket retention period](/influxdb/cloud/reference/internals/shards/#shard-group-duration). #### Related guides - - - InfluxDB [shards and shard - groups](https://docs.influxdata.com/influxdb/cloud/reference/internals/shards/) + - InfluxDB [shards and shard groups](/influxdb/cloud/reference/internals/shards/) format: int64 type: integer type: @@ -3545,11 +3428,9 @@ components: $ref: '#/components/schemas/Resource' properties: id: - description: > + description: | The ID of a specific resource. - - In a `permission`, applies the permission to only the resource - with this ID. + In a `permission`, applies the permission to only the resource with this ID. type: string name: description: | @@ -3561,18 +3442,14 @@ components: Optional: The name of the organization with `orgID`. type: string orgID: - description: > + description: | The ID of the organization that owns the resource. - - In a `permission`, applies the permission to all resources of - `type` owned by this organization. + In a `permission`, applies the permission to all resources of `type` owned by this organization. type: string type: - description: > + description: | The type of resource. - - In a `permission`, applies the permission to all resources of - this type. + In a `permission`, applies the permission to all resources of this type. enum: - authorizations - buckets @@ -3615,9 +3492,7 @@ components: $ref: '#/components/schemas/NodeType' type: object PipeLiteral: - description: >- - Represents a specialized literal value, indicating the left hand value - of a pipe expression + description: Represents a specialized literal value, indicating the left hand value of a pipe expression properties: type: $ref: '#/components/schemas/NodeType' @@ -3641,41 +3516,27 @@ components: $ref: '#/components/schemas/RetentionRules' rp: default: '0' - description: > - Retention policy is an InfluxDB 1.x concept that represents the - duration - - of time that each data point in the retention policy persists. Use - `rp` - + description: | + Retention policy is an InfluxDB 1.x concept that represents the duration + of time that each data point in the retention policy persists. Use `rp` for compatibility with InfluxDB 1.x. - The InfluxDB 2.x and Cloud equivalent is - - [retention - period](https://docs.influxdata.com/influxdb/cloud/reference/glossary/#retention-period). + [retention period](/influxdb/cloud/reference/glossary/#retention-period). type: string schemaType: $ref: '#/components/schemas/SchemaType' default: implicit - description: > + description: | Schema Type. - - Use `explicit` to enforce column names, tags, fields, and data types - for - + Use `explicit` to enforce column names, tags, fields, and data types for your data. - #### InfluxDB Cloud - - Default is `implicit`. - #### InfluxDB OSS - - Doesn't support `schemaType`. required: - orgID @@ -3731,22 +3592,18 @@ components: type: string params: additionalProperties: true - description: > + description: | Key-value pairs passed as parameters during query execution. - - To use parameters in your query, pass a _`query`_ with `params` - references (in dot notation)--for example: - + To use parameters in your query, pass a _`query`_ with `params` references (in dot notation)--for example: ```json - query: "from(bucket: params.mybucket) |> range(start: params.rangeStart) |> limit(n:1)" + query: "from(bucket: params.mybucket)\ + |> range(start: params.rangeStart) |> limit(n:1)" ``` - and pass _`params`_ with the key-value pairs--for example: - ```json params: { "mybucket": "environment", @@ -3754,14 +3611,10 @@ components: } ``` - - During query execution, InfluxDB passes _`params`_ to your script - and substitutes the values. - + During query execution, InfluxDB passes _`params`_ to your script and substitutes the values. #### Limitations - - If you use _`params`_, you can't use _`extern`_. type: object query: @@ -3830,9 +3683,7 @@ components: type: string type: object RegexpLiteral: - description: >- - Expressions begin and end with `/` and are regular expressions with - syntax accepted by RE2 + description: Expressions begin and end with `/` and are regular expressions with syntax accepted by RE2 properties: type: $ref: '#/components/schemas/NodeType' @@ -3856,9 +3707,7 @@ components: Resource: properties: id: - description: >- - If ID is set that is a permission for a specific resource. if it is - not set it is a permission for all resources of that resource type. + description: If ID is set that is a permission for a specific resource. if it is not set it is a permission for all resources of that resource type. nullable: true type: string name: @@ -3870,10 +3719,7 @@ components: nullable: true type: string orgID: - description: >- - If orgID is set that is a permission for all resources owned my that - org. if it is not set it is a permission for all resources of that - resource type. + description: If orgID is set that is a permission for all resources owned my that org. if it is not set it is a permission for all resources of that resource type. nullable: true type: string type: @@ -3951,37 +3797,27 @@ components: properties: everySeconds: default: 2592000 - description: > - The duration in seconds for how long data will be kept in the - database. - + description: | + The duration in seconds for how long data will be kept in the database. The default duration is 2592000 (30 days). - 0 represents infinite retention. example: 86400 format: int64 minimum: 0 type: integer shardGroupDurationSeconds: - description: > + description: | The shard group duration. - The duration or interval (in seconds) that each shard group covers. - #### InfluxDB Cloud - - Does not use `shardGroupDurationsSeconds`. - #### InfluxDB OSS - - Default value depends on the - - [bucket retention - period](https://docs.influxdata.com/influxdb/cloud/v2.3/reference/internals/shards/#shard-group-duration). + [bucket retention period](/influxdb/cloud/v2.3/reference/internals/shards/#shard-group-duration). format: int64 type: integer type: @@ -4106,10 +3942,7 @@ components: Run: properties: finishedAt: - description: >- - The time ([RFC3339Nano date/time - format](https://go.dev/src/time/format.go)) the run finished - executing. + description: The time ([RFC3339Nano date/time format](https://go.dev/src/time/format.go)) the run finished executing. example: 2006-01-02T15:04:05.999999999Z07:00 format: date-time readOnly: true @@ -4145,26 +3978,17 @@ components: readOnly: true type: array requestedAt: - description: >- - The time ([RFC3339Nano date/time - format](https://docs.influxdata.com/influxdb/cloud/reference/glossary/#rfc3339nano-timestamp)) - the run was manually requested. + description: The time ([RFC3339Nano date/time format](/influxdb/cloud/reference/glossary/#rfc3339nano-timestamp)) the run was manually requested. example: 2006-01-02T15:04:05.999999999Z07:00 format: date-time readOnly: true type: string scheduledFor: - description: >- - The time [RFC3339 date/time - format](https://docs.influxdata.com/influxdb/cloud/reference/glossary/#rfc3339-timestamp) - used for the run's `now` option. + description: The time [RFC3339 date/time format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp) used for the run's `now` option. format: date-time type: string startedAt: - description: >- - The time ([RFC3339Nano date/time - format](https://go.dev/src/time/format.go)) the run started - executing. + description: The time ([RFC3339Nano date/time format](https://go.dev/src/time/format.go)) the run started executing. example: 2006-01-02T15:04:05.999999999Z07:00 format: date-time readOnly: true @@ -4184,12 +4008,9 @@ components: RunManually: properties: scheduledFor: - description: > - The time [RFC3339 date/time - format](https://docs.influxdata.com/influxdb/cloud/reference/glossary/#rfc3339-timestamp) - + description: | + The time [RFC3339 date/time format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp) used for the run's `now` option. - Default is the server _now_ time. format: date-time nullable: true @@ -4367,14 +4188,14 @@ components: orgID: type: string script: - description: script to be executed + description: The script to execute. type: string updatedAt: format: date-time readOnly: true type: string url: - description: invocation endpoint address + description: The invocation endpoint address. type: string required: - name @@ -4409,17 +4230,11 @@ components: properties: params: additionalProperties: true - description: > + description: | The script parameters. - - `params` contains key-value pairs that map values to the - **params.keys** - + `params` contains key-value pairs that map values to the **params.keys** in a script. - - When you invoke a script with `params`, InfluxDB passes the values - as - + When you invoke a script with `params`, InfluxDB passes the values as invocation parameters to the script. type: object type: object @@ -4555,9 +4370,7 @@ components: description: Specifies the API token string. Specify either `URL` or `Token`. type: string url: - description: >- - Specifies the URL of the Slack endpoint. Specify either `URL` or - `Token`. + description: Specifies the URL of the Slack endpoint. Specify either `URL` or `Token`. type: string type: object type: object @@ -4700,9 +4513,7 @@ components: decimalPlaces: $ref: '#/components/schemas/DecimalPlaces' fieldOptions: - description: >- - fieldOptions represent the fields retrieved by the query with - customization options + description: fieldOptions represent the fields retrieved by the query with customization options items: $ref: '#/components/schemas/RenamableField' type: array @@ -4722,21 +4533,15 @@ components: tableOptions: properties: fixFirstColumn: - description: >- - fixFirstColumn indicates whether the first column of the table - should be locked + description: fixFirstColumn indicates whether the first column of the table should be locked type: boolean sortBy: $ref: '#/components/schemas/RenamableField' verticalTimeAxis: - description: >- - verticalTimeAxis describes the orientation of the table by - indicating whether the time axis will be displayed vertically + description: verticalTimeAxis describes the orientation of the table by indicating whether the time axis will be displayed vertically type: boolean wrapping: - description: >- - Wrapping describes the text wrapping style to be used in table - views + description: Wrapping describes the text wrapping style to be used in table views enum: - truncate - wrap @@ -4744,9 +4549,7 @@ components: type: string type: object timeFormat: - description: >- - timeFormat describes the display format for time values according to - moment.js date formatting + description: timeFormat describes the display format for time values according to moment.js date formatting type: string type: enum: @@ -4781,41 +4584,35 @@ components: Task: properties: authorizationID: - description: >- - The ID of the authorization used when the task communicates with the - query engine. + description: | + An authorization ID. + Specifies the authorization used when the task communicates with the query engine. + + To find an authorization ID, use the + [`GET /api/v2/authorizations` endpoint](#operation/GetAuthorizations) to + list authorizations. type: string createdAt: format: date-time readOnly: true type: string cron: - description: >- - A [Cron expression](https://en.wikipedia.org/wiki/Cron#Overview) - that defines the schedule on which the task runs. InfluxDB uses the - system time when evaluating Cron expressions. + description: A [Cron expression](https://en.wikipedia.org/wiki/Cron#Overview) that defines the schedule on which the task runs. InfluxDB uses the system time when evaluating Cron expressions. type: string description: - description: The description of the task. + description: A description of the task. type: string every: - description: >- - The interval ([duration - literal](https://docs.influxdata.com/influxdb/cloud/reference/glossary/#rfc3339-timestamp)) - at which the task runs. `every` also determines when the task first - runs, depending on the specified time. + description: The interval ([duration literal](/influxdb/cloud/reference/glossary/#rfc3339-timestamp)) at which the task runs. `every` also determines when the task first runs, depending on the specified time. format: duration type: string flux: - description: > - The Flux script that the task runs. - + description: | + The Flux script that the task executes. #### Limitations - - - - If you use the `flux` property, you can't use the `scriptID` and - `scriptParameters` properties. + - If you use the `flux` property, you can't use the `scriptID` and `scriptParameters` properties. + format: flux type: string id: readOnly: true @@ -4833,10 +4630,7 @@ components: readOnly: true type: string latestCompleted: - description: >- - A timestamp ([RFC3339 date/time - format](https://docs.influxdata.com/influxdb/cloud/reference/glossary/#rfc3339-timestamp)) - of the latest scheduled and completed run. + description: A timestamp ([RFC3339 date/time format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp)) of the latest scheduled and completed run. format: date-time readOnly: true type: string @@ -4867,46 +4661,52 @@ components: description: The name of the task. type: string offset: - description: >- - A - [duration](https://docs.influxdata.com/flux/v0.x/spec/lexical-elements/#duration-literals) - to delay execution of the task after the scheduled time has elapsed. - `0` removes the offset. + description: A [duration](https://docs.influxdata.com/flux/v0.x/spec/lexical-elements/#duration-literals) to delay execution of the task after the scheduled time has elapsed. `0` removes the offset. format: duration type: string org: - description: The name of the organization that owns the task. + description: | + An [organization](/influxdb/cloud/reference/glossary/#organization) name. + Specifies the organization that owns the task. type: string orgID: - description: The ID of the organization that owns the task. + description: | + An [organization](/influxdb/cloud/reference/glossary/#organization) ID. + Specifies the organization that owns the task. type: string ownerID: - description: The ID of the user who owns the task. + description: | + A [user](/influxdb/cloud/reference/glossary/#user) ID. + Specifies the owner of the task. + + To find a user ID, you can use the + [`GET /api/v2/users` endpoint](#operation/GetUsers) to + list users. type: string scriptID: - description: > - The ID of the script that the task runs. - + description: | + A script ID. + Specifies the [invokable script](#tag/Invokable-Scripts) that the task executes. #### Limitations + - If you use the `scriptID` property, you can't use the `flux` property. - - If you use the `scriptID` property, you can't use the `flux` - property. + #### Related guides + + - [Create a task that references a script](/influxdb/cloud/process-data/manage-tasks/create-task/#create-a-task-that-references-a-script) type: string scriptParameters: - description: > - The parameter key-value pairs passed to the script (referenced by - `scriptID`) during the task run. - + description: | + Key-value pairs for `params` in the script. + Defines the invocation parameter values passed to the script specified by `scriptID`. + When running the task, InfluxDB executes the script with the parameters + you provide. #### Limitations - - - `scriptParameters` requires `scriptID`. - - - If you use the `scriptID` and `scriptParameters` properties, you - can't use the `flux` property. + - To use `scriptParameters`, you must provide a `scriptID`. + - If you use the `scriptID` and `scriptParameters` properties, you can't use the `flux` property. type: object status: $ref: '#/components/schemas/TaskStatusType' @@ -4922,43 +4722,29 @@ components: TaskCreateRequest: properties: cron: - description: >- - A [Cron expression](https://en.wikipedia.org/wiki/Cron#Overview) - that defines the schedule on which the task runs. InfluxDB bases - cron runs on the system time. + description: A [Cron expression](https://en.wikipedia.org/wiki/Cron#Overview) that defines the schedule on which the task runs. InfluxDB bases cron runs on the system time. type: string description: description: The description of the task. type: string every: - description: > - The interval ([duration - literal](https://docs.influxdata.com/flux/v0.x/spec/lexical-elements/#duration-literals))) - at which the task runs. - - `every` also determines when the task first runs, depending on the - specified time. + description: | + The interval ([duration literal](https://docs.influxdata.com/flux/v0.x/spec/lexical-elements/#duration-literals))) at which the task runs. + `every` also determines when the task first runs, depending on the specified time. type: string flux: - description: > + description: | The Flux script that the task runs. - #### Limitations - - - If you use the `flux` property, you can't use the `scriptID` and - `scriptParameters` properties. + - If you use the `flux` property, you can't use the `scriptID` and `scriptParameters` properties. type: string name: description: The name of the task type: string offset: - description: >- - A - [duration](https://docs.influxdata.com/flux/v0.x/spec/lexical-elements/#duration-literals) - to delay execution of the task after the scheduled time has elapsed. - `0` removes the offset. + description: A [duration](https://docs.influxdata.com/flux/v0.x/spec/lexical-elements/#duration-literals) to delay execution of the task after the scheduled time has elapsed. `0` removes the offset. format: duration type: string org: @@ -4968,29 +4754,21 @@ components: description: The ID of the organization that owns the task. type: string scriptID: - description: > + description: | The ID of the script that the task runs. - #### Limitations - - - If you use the `scriptID` property, you can't use the `flux` - property. + - If you use the `scriptID` property, you can't use the `flux` property. type: string scriptParameters: - description: > - The parameter key-value pairs passed to the script (referenced by - `scriptID`) during the task run. - + description: | + The parameter key-value pairs passed to the script (referenced by `scriptID`) during the task run. #### Limitations - - `scriptParameters` requires `scriptID`. - - - If you use the `scriptID` and `scriptParameters` properties, you - can't use the `flux` property. + - If you use the `scriptID` and `scriptParameters` properties, you can't use the `flux` property. type: object status: $ref: '#/components/schemas/TaskStatusType' @@ -5155,14 +4933,10 @@ components: - $ref: '#/components/schemas/NotificationEndpointBase' - properties: channel: - description: >- - The ID of the telegram channel; a chat_id in - https://core.telegram.org/bots/api#sendmessage . + description: The ID of the telegram channel; a chat_id in https://core.telegram.org/bots/api#sendmessage . type: string token: - description: >- - Specifies the Telegram bot token. See - https://core.telegram.org/bots#creating-a-new-bot . + description: Specifies the Telegram bot token. See https://core.telegram.org/bots#creating-a-new-bot . type: string required: - token @@ -5176,27 +4950,20 @@ components: TelegramNotificationRuleBase: properties: disableWebPagePreview: - description: >- - Disables preview of web links in the sent messages when "true". - Defaults to "false". + description: Disables preview of web links in the sent messages when "true". Defaults to "false". type: boolean messageTemplate: description: The message template as a flux interpolated string. type: string parseMode: - description: >- - Parse mode of the message text per - https://core.telegram.org/bots/api#formatting-options. Defaults to - "MarkdownV2". + description: Parse mode of the message text per https://core.telegram.org/bots/api#formatting-options. Defaults to "MarkdownV2". enum: - MarkdownV2 - HTML - Markdown type: string type: - description: >- - The discriminator between other types of notification rules is - "telegram". + description: The discriminator between other types of notification rules is "telegram". enum: - telegram type: string @@ -5217,72 +4984,58 @@ components: kind: $ref: '#/components/schemas/TemplateKind' metadata: - description: > - Metadata properties used for the resource when the template is - applied. + description: | + Metadata properties used for the resource when the template is applied. properties: name: type: string type: object spec: - description: > - Configuration properties used for the resource when the template - is applied. - + description: | + Configuration properties used for the resource when the template is applied. Key-value pairs map to the specification for the resource. - - The following code samples show `spec` configurations for template - resources: - + The following code samples show `spec` configurations for template resources: - A bucket: - ```json - { "spec": { - "name": "iot_center", - "retentionRules": [{ - "everySeconds": 2.592e+06, - "type": "expire" - }] - } + ```json + { "spec": { + "name": "iot_center", + "retentionRules": [{ + "everySeconds": 2.592e+06, + "type": "expire" + }] } - ``` + } + ``` - A variable: - ```json - { "spec": { - "language": "flux", - "name": "Node_Service", - "query": "import \"influxdata/influxdb/v1\"\r\nv1.tagValues(bucket: \"iot_center\", - tag: \"service\")", - "type": "query" - } + ```json + { "spec": { + "language": "flux", + "name": "Node_Service", + "query": "import \"influxdata/influxdb/v1\"\r\nv1.tagValues(bucket: \"iot_center\", + tag: \"service\")", + "type": "query" } - ``` + } + ``` type: object type: object type: array TemplateApply: properties: actions: - description: > + description: | A list of `action` objects. + Actions let you customize how InfluxDB applies templates in the request. - Actions let you customize how InfluxDB applies templates in the - request. + You can use the following actions to prevent creating or updating resources: - - You can use the following actions to prevent creating or updating - resources: - - - - A `skipKind` action skips template resources of a specified - `kind`. - - - A `skipResource` action skips template resources with a specified - `metadata.name` + - A `skipKind` action skips template resources of a specified `kind`. + - A `skipResource` action skips template resources with a specified `metadata.name` and `kind`. items: oneOf: @@ -5317,14 +5070,11 @@ components: type: object type: array dryRun: - description: > + description: | Only applies a dry run of the templates passed in the request. - - Validates the template and generates a resource diff and summary. - - - Doesn't install templates or make changes to the InfluxDB - instance. + - Doesn't install templates or make changes to the InfluxDB instance. type: boolean envRefs: additionalProperties: @@ -5333,30 +5083,17 @@ components: - type: integer - type: number - type: boolean - description: > - An object with key-value pairs that map to **environment - references** in templates. - - - Environment references in templates are `envRef` objects with an - `envRef.key` + description: | + An object with key-value pairs that map to **environment references** in templates. + Environment references in templates are `envRef` objects with an `envRef.key` property. - - To substitute a custom environment reference value when applying - templates, - + To substitute a custom environment reference value when applying templates, pass `envRefs` with the `envRef.key` and the value. - - When you apply a template, InfluxDB replaces `envRef` objects in the - template - + When you apply a template, InfluxDB replaces `envRef` objects in the template with the values that you provide in the `envRefs` parameter. - - For more examples, see how to [define environment - references](https://docs.influxdata.com/influxdb/cloud/influxdb-templates/use/#define-environment-references). - + For more examples, see how to [define environment references](/influxdb/cloud/influxdb-templates/use/#define-environment-references). The following template fields may use environment references: @@ -5364,25 +5101,17 @@ components: - `spec.endpointName` - `spec.associations.name` - For more information about including environment references in - template fields, see how to - - [include user-definable resource - names](https://docs.influxdata.com/influxdb/cloud/influxdb-templates/create/#include-user-definable-resource-names). + For more information about including environment references in template fields, see how to + [include user-definable resource names](/influxdb/cloud/influxdb-templates/create/#include-user-definable-resource-names). type: object orgID: - description: > + description: | Organization ID. - InfluxDB applies templates to this organization. - The organization owns all resources created by the template. - To find your organization, see how to - - [view - organizations](https://docs.influxdata.com/influxdb/cloud/organizations/view-orgs/). + [view organizations](/influxdb/cloud/organizations/view-orgs/). type: string remotes: description: | @@ -5403,99 +5132,72 @@ components: secrets: additionalProperties: type: string - description: > + description: | An object with key-value pairs that map to **secrets** in queries. - Queries may reference secrets stored in InfluxDB--for example, - - the following Flux script retrieves `POSTGRES_USERNAME` and - `POSTGRES_PASSWORD` - + the following Flux script retrieves `POSTGRES_USERNAME` and `POSTGRES_PASSWORD` secrets and then uses them to connect to a PostgreSQL database: - ```js - import "sql" - import "influxdata/influxdb/secrets" + ```js + import "sql" + import "influxdata/influxdb/secrets" - username = secrets.get(key: "POSTGRES_USERNAME") - password = secrets.get(key: "POSTGRES_PASSWORD") + username = secrets.get(key: "POSTGRES_USERNAME") + password = secrets.get(key: "POSTGRES_PASSWORD") - sql.from( - driverName: "postgres", - dataSourceName: "postgresql://${username}:${password}@localhost:5432", - query: "SELECT * FROM example_table", - ) - ``` + sql.from( + driverName: "postgres", + dataSourceName: "postgresql://${username}:${password}@localhost:5432", + query: "SELECT * FROM example_table", + ) + ``` To define secret values in your `/api/v2/templates/apply` request, - pass the `secrets` parameter with key-value pairs--for example: - ```json - { - ... - "secrets": { - "POSTGRES_USERNAME": "pguser", - "POSTGRES_PASSWORD": "foo" - } - ... + ```json + { + ... + "secrets": { + "POSTGRES_USERNAME": "pguser", + "POSTGRES_PASSWORD": "foo" } - ``` - - InfluxDB stores the key-value pairs as secrets that you can access - with `secrets.get()`. + ... + } + ``` + InfluxDB stores the key-value pairs as secrets that you can access with `secrets.get()`. Once stored, you can't view secret values in InfluxDB. - #### Related guides - - - [How to pass secrets when installing a - template](https://docs.influxdata.com/influxdb/cloud/influxdb-templates/use/#pass-secrets-when-installing-a-template) + - [How to pass secrets when installing a template](/influxdb/cloud/influxdb-templates/use/#pass-secrets-when-installing-a-template) type: object stackID: - description: > + description: | ID of the stack to update. - - To apply templates to an existing stack in the organization, use the - `stackID` parameter. - + To apply templates to an existing stack in the organization, use the `stackID` parameter. If you apply templates without providing a stack ID, - InfluxDB initializes a new stack with all new resources. - - To find a stack ID, use the InfluxDB [`/api/v2/stacks` API - endpoint](#operation/ListStacks) to list stacks. - + To find a stack ID, use the InfluxDB [`/api/v2/stacks` API endpoint](#operation/ListStacks) to list stacks. #### Related guides - - - - [Stacks](https://docs.influxdata.com/influxdb/cloud/influxdb-templates/stacks/) - - - [View - stacks](https://docs.influxdata.com/influxdb/cloud/influxdb-templates/stacks/view/) + - [Stacks](/influxdb/cloud/influxdb-templates/stacks/) + - [View stacks](/influxdb/cloud/influxdb-templates/stacks/view/) type: string template: - description: > + description: | A template object to apply. - A template object has a `contents` property - with an array of InfluxDB resource configurations. - Pass `template` to apply only one template object. - If you use `template`, you can't use the `templates` parameter. - - If you want to apply multiple template objects, use `templates` - instead. + If you want to apply multiple template objects, use `templates` instead. properties: contentType: type: string @@ -5544,9 +5246,7 @@ components: items: properties: defaultValue: - description: >- - Default value that will be provided for the reference when no - value is provided + description: Default value that will be provided for the reference when no value is provided nullable: true oneOf: - type: string @@ -5554,9 +5254,7 @@ components: - type: number - type: boolean envRefKey: - description: >- - Key identified as environment reference and is the key identified - in the template + description: Key identified as environment reference and is the key identified in the template type: string resourceField: description: Field the environment reference corresponds too @@ -5602,10 +5300,7 @@ components: kind: $ref: '#/components/schemas/TemplateKind' name: - description: >- - if defined with id, name is used for resource exported by id. - if defined independently, resources strictly matching name are - exported + description: if defined with id, name is used for resource exported by id. if defined independently, resources strictly matching name are exported type: string required: - id @@ -6416,7 +6111,7 @@ components: properties: id: description: | - The ID of the user. + The user ID. readOnly: true type: string links: @@ -6430,13 +6125,13 @@ components: type: object name: description: | - The name of the user. + The user name. type: string status: default: active - description: > - The status of a user. An inactive user won't have access to - resources. + description: | + The status of a user. + An inactive user can't read or write resources. enum: - active - inactive @@ -6720,76 +6415,57 @@ components: type: object securitySchemes: BasicAuthentication: - description: > + description: | ### Basic authentication scheme + Use the HTTP Basic authentication scheme for InfluxDB `/api/v2` API operations that support it: - Use the HTTP Basic authentication scheme for InfluxDB `/api/v2` API - operations that support it: - - **username**: InfluxDB Cloud username - - **password**: InfluxDB Cloud API token + - **username**: InfluxDB Cloud username + - **password**: InfluxDB Cloud API token #### Example - - `curl --get "https://europe-west1-1.gcp.cloud2.influxdata.com/query" - --user "exampleuser@influxdata.com":"INFLUX_API_TOKEN"` + ```sh + curl --get "https://europe-west1-1.gcp.cloud2.influxdata.com/query" + --user "exampleuser@influxdata.com":"INFLUX_API_TOKEN" + ``` Replace the following: - - *`exampleuser@influxdata.com`*: the email address that you signed up - with - - - *`INFLUX_API_TOKEN`*: your [InfluxDB API - token](/influxdb/cloud/reference/glossary/#token) + - *`exampleuser@influxdata.com`*: the email address that you signed up with + - *`INFLUX_API_TOKEN`*: your [InfluxDB API token](/influxdb/cloud/reference/glossary/#token) scheme: basic type: http TokenAuthentication: - description: > - Use the [Token - authentication](#section/Authentication/TokenAuthentication) - + description: | + Use the [Token authentication](#section/Authentication/TokenAuthentication) scheme to authenticate to the InfluxDB API. - - In your API requests, send an `Authorization` header. - - For the header value, provide the word `Token` followed by a space and - an InfluxDB API token. - + For the header value, provide the word `Token` followed by a space and an InfluxDB API token. The word `Token` is case-sensitive. - - ### Syntax - - `Authorization: Token YOUR_INFLUX_API_TOKEN` - - + `Authorization: Token INFLUX_API_TOKEN` For more information and examples, see the following: - - [`/authorizations`](#tag/Authorizations) endpoint. - - [Authorize API requests](/influxdb/cloud/api-guide/api_intro/#authentication). - - [Manage API tokens](/influxdb/cloud/security/tokens/). + + - [`/authorizations`(#tag/Authorizations) endpoints] + - [Authorize API requests](/influxdb/cloud/api-guide/api_intro/#authentication) + - [Manage API tokens](/influxdb/cloud/security/tokens/) in: header name: Authorization type: apiKey info: title: InfluxDB Cloud API Service version: Cloud 2.x - description: > - The InfluxDB v2 API provides a programmatic interface for all interactions - with InfluxDB. - + description: | + The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. - This documentation is generated from the - - [InfluxDB OpenAPI - specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/ref/cloud.yml). + [InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/ref/cloud.yml). openapi: 3.0.0 paths: /api/v2: @@ -6810,69 +6486,63 @@ paths: - System information endpoints /api/v2/authorizations: get: - description: > + description: | Retrieves a list of authorizations. - - To limit which authorizations are returned, pass query parameters in - your request. - - If no query parameters are passed, InfluxDB returns all authorizations - for the organization. - + To limit which authorizations are returned, pass query parameters in your request. + If no query parameters are passed, InfluxDB returns all authorizations for the organization. #### InfluxDB Cloud - - - InfluxDB Cloud doesn't expose [API - token](/influxdb/cloud/reference/glossary/#token) + - InfluxDB Cloud doesn't expose [API token](/influxdb/cloud/reference/glossary/#token) values in `GET /api/v2/authorizations` responses; returns `token: redacted` for all authorizations. #### Required permissions for InfluxDB Cloud - - `read-authorizations` - #### Related guides - - - [View tokens](/influxdb/cloud/security/tokens/view-tokens/). + - [View tokens](/influxdb/cloud/security/tokens/view-tokens/) operationId: GetAuthorizations parameters: - $ref: '#/components/parameters/TraceSpan' - description: | A user ID. - Only returns authorizations scoped to this user. + Only returns authorizations scoped to the specified + [user](/influxdb/cloud/reference/glossary/#user). in: query name: userID schema: type: string - description: | A user name. - Only returns authorizations scoped to this user. + Only returns authorizations scoped to the specified + [user](/influxdb/cloud/reference/glossary/#user). in: query name: user schema: type: string - description: | An organization ID. - Only returns authorizations that belong to this organization. + Only returns authorizations that belong to the specified + [organization](/influxdb/cloud/reference/glossary/#organization). in: query name: orgID schema: type: string - description: | An organization name. - Only returns authorizations that belong to this organization. + Only returns authorizations that belong to the specified + [organization](/influxdb/cloud/reference/glossary/#organization). in: query name: org schema: type: string - description: | - A token value. - Only returns the authorization that has this token. + An API [token](/influxdb/cloud/reference/glossary/#token) value. + Returns the authorization for the specified token. in: query name: token schema: @@ -6898,62 +6568,36 @@ paths: tags: - Authorizations post: - description: > - Creates an authorization. - - - Use this endpoint to create an authorization, which generates an API - token - - with permissions to `read` or `write` to a specific resource or `type` - of resource. - - The response contains the new authorization with the generated API - token. + description: | + Creates an authorization and returns the authorization with the + generated API [token](/influxdb/cloud/reference/glossary/#token). + Use this endpoint to create an authorization, which generates an API token + with permissions to `read` or `write` to a specific resource or `type` of resource. Keep the following in mind when creating and updating authorizations: - - - To apply a permission to a specific resource, specify the resource - `id` field. - - - To apply a permission to all resources with the type, omit the - resource `id`. - - - To scope an authorization to a specific user, provide the `userID` - property. - + - To apply a permission to a specific resource, specify the resource `id` field. + - To apply a permission to all resources with the type, omit the resource `id`. + - To scope an authorization to a specific user, provide the `userID` property. #### Limitations - To follow best practices for secure API token generation and retrieval, - InfluxDB Cloud enforces access restrictions on API tokens. - - - InfluxDB Cloud only allows access to the API token value immediately - after the authorization is created. - - - You can’t change access (read/write) permissions for an API token - after it’s created. - + - InfluxDB Cloud only allows access to the API token value immediately after the authorization is created. + - You can’t change access (read/write) permissions for an API token after it’s created. - Tokens stop working when the user who created the token is deleted. - We recommend the following for managing your tokens: - - Create a generic user to create and manage tokens for writing data. - - Store your tokens in a secure password vault for future access. - #### Related guides - - - [Create a token](/influxdb/cloud/security/tokens/create-token/). + - [Create a token](/influxdb/cloud/security/tokens/create-token/) operationId: PostAuthorizations parameters: - $ref: '#/components/parameters/TraceSpan' @@ -6970,7 +6614,8 @@ paths: application/json: schema: $ref: '#/components/schemas/Authorization' - description: Authorization created + description: | + Success. The authorization is created. The response body contains the authorization. '400': $ref: '#/components/responses/GeneralServerError' description: Invalid request @@ -7094,33 +6739,23 @@ paths: - Authorizations /api/v2/buckets: get: - description: > - Retrieves a list of - [buckets](/influxdb/cloud/reference/glossary/#bucket). - - - To limit which buckets are returned, pass query parameters in your - request. - - If no query parameters are passed, InfluxDB returns all buckets up to - the + description: | + Retrieves a list of [buckets](/influxdb/cloud/reference/glossary/#bucket). + To limit which buckets are returned, pass query parameters in your request. + If no query parameters are passed, InfluxDB returns all buckets up to the default `limit`. - #### Limitations - - - Paging with an `offset` greater than the number of records will result - in - + - Paging with an `offset` greater than the number of records will result in an empty list of buckets--for example: The following request is paging to the 50th record, but the user only has 10 buckets. ```sh - $ curl --request GET "INFLUX_URL/api/v2/scripts?limit=1&offset=50" + $ curl --request GET "INFLUX_URL/api/v2/buckets?limit=1&offset=50" $ { "links": { @@ -7133,7 +6768,6 @@ paths: #### Related Guides - - [Manage buckets](/influxdb/cloud/organizations/buckets/) operationId: GetBuckets parameters: @@ -7217,8 +6851,7 @@ paths: type: system updatedAt: '2022-03-15T17:22:33.726179487Z' links: - self: >- - /api/v2/buckets?descending=false&limit=20&name=_monitoring&offset=0&orgID=ORG_ID + self: /api/v2/buckets?descending=false&limit=20&name=_monitoring&offset=0&orgID=ORG_ID schema: $ref: '#/components/schemas/Buckets' description: | @@ -7240,56 +6873,36 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request GET - "http://localhost:8086/api/v2/buckets?name=_monitoring" \ + source: | + curl --request GET "http://localhost:8086/api/v2/buckets?name=_monitoring" \ --header "Authorization: Token INFLUX_TOKEN" \ --header "Accept: application/json" \ --header "Content-Type: application/json" post: - description: > + description: | Creates a [bucket](/influxdb/cloud/reference/glossary/#bucket) - and returns the created bucket along with metadata. The default data - [retention period](/influxdb/cloud/reference/glossary/#retention-period) - is 30 days. - #### InfluxDB OSS - - A single InfluxDB OSS instance supports active writes or queries for - - approximately 20 buckets across all organizations at a given time. - Reading - + approximately 20 buckets across all organizations at a given time. Reading or writing to more than 20 buckets at a time can adversely affect - performance. - #### Limitations - - InfluxDB Cloud Free Plan allows users to create up to two buckets. - Exceeding the bucket quota will result in an HTTP `403` status code. - For additional information regarding InfluxDB Cloud offerings, see - - [InfluxDB Cloud - Pricing](https://www.influxdata.com/influxdb-cloud-pricing/). - + [InfluxDB Cloud Pricing](https://www.influxdata.com/influxdb-cloud-pricing/). #### Related Guides - - [Create bucket](/influxdb/cloud/organizations/buckets/create-bucket/) - - - [Create bucket CLI - reference](/influxdb/cloud/reference/cli/influx/bucket/create) + - [Create bucket CLI reference](/influxdb/cloud/reference/cli/influx/bucket/create) operationId: PostBuckets parameters: - $ref: '#/components/parameters/TraceSpan' @@ -7401,13 +7014,11 @@ paths: }' /api/v2/buckets/{bucketID}: delete: - description: > + description: | Deletes a bucket and all associated records. - #### InfluxDB Cloud - - Does the following when you send a delete request: 1. Validates the request and queues the delete. @@ -7416,23 +7027,16 @@ paths: #### InfluxDB OSS - - Validates the request, handles the delete synchronously, - and then responds with success or failure. - #### Limitations - - Only one bucket can be deleted per request. - #### Related Guides - - - [Delete a - bucket](/influxdb/cloud/organizations/buckets/delete-bucket/#delete-a-bucket-in-the-influxdb-ui) + - [Delete a bucket](/influxdb/cloud/organizations/buckets/delete-bucket/#delete-a-bucket-in-the-influxdb-ui) operationId: DeleteBucketsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -7499,9 +7103,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request DELETE - "http://localhost:8086/api/v2/buckets/BUCKET_ID" \ + source: | + curl --request DELETE "http://localhost:8086/api/v2/buckets/BUCKET_ID" \ --header "Authorization: Token INFLUX_TOKEN" \ --header 'Accept: application/json' get: @@ -7579,36 +7182,24 @@ paths: tags: - Buckets patch: - description: > + description: | Updates a bucket. - Use this endpoint to update properties - (`name`, `description`, and `retentionRules`) of a bucket. - #### InfluxDB Cloud - - - Requires the `retentionRules` property in the request body. If you - don't - - provide `retentionRules`, InfluxDB responds with an HTTP `403` status - code. - + - Requires the `retentionRules` property in the request body. If you don't + provide `retentionRules`, InfluxDB responds with an HTTP `403` status code. #### InfluxDB OSS - - Doesn't require `retentionRules`. - #### Related Guides - - - [Update a - bucket](/influxdb/cloud/organizations/buckets/update-bucket/) + - [Update a bucket](/influxdb/cloud/organizations/buckets/update-bucket/) operationId: PatchBucketsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -7659,17 +7250,13 @@ paths: application/json: examples: invalidJSONStringValue: - description: > - If the request body contains invalid JSON, InfluxDB returns - `invalid` - + description: | + If the request body contains invalid JSON, InfluxDB returns `invalid` with detail about the problem. summary: Invalid JSON value: code: invalid - message: >- - invalid json: invalid character '\'' looking for beginning - of value + message: 'invalid json: invalid character ''\'''' looking for beginning of value' schema: $ref: '#/components/schemas/Error' description: | @@ -7681,10 +7268,8 @@ paths: application/json: examples: invalidRetention: - summary: > - The retention policy provided exceeds the max retention for - the - + summary: | + The retention policy provided exceeds the max retention for the organization. value: code: forbidden @@ -7722,9 +7307,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request PATCH "http://localhost:8086/api/v2/buckets/BUCKET_ID - \ + source: | + curl --request PATCH "http://localhost:8086/api/v2/buckets/BUCKET_ID \ --header "Authorization: Token INFLUX_TOKEN" \ --header "Accept: application/json" \ --header "Content-Type: application/json" \ @@ -7740,34 +7324,20 @@ paths: }' /api/v2/buckets/{bucketID}/labels: get: - description: > + description: | Retrieves a list of all labels for a bucket. - - Labels are objects that contain `labelID`, `name`, `description`, and - `color` - + Labels are objects that contain `labelID`, `name`, `description`, and `color` key-value pairs. They may be used for grouping and filtering InfluxDB - resources. - - Labels are also capable of grouping across different resources--for - example, - - you can apply a label named `air_sensor` to a bucket and a task to - quickly - + Labels are also capable of grouping across different resources--for example, + you can apply a label named `air_sensor` to a bucket and a task to quickly organize resources. - #### Related guides - - - Use the [`/api/v2/labels` InfluxDB API endpoint](#tag/Labels) to - retrieve and manage labels. - - - [Manage labels in the InfluxDB - UI](/influxdb/cloud/visualize-data/labels/) + - Use the [`/api/v2/labels` InfluxDB API endpoint](#tag/Labels) to retrieve and manage labels. + - [Manage labels in the InfluxDB UI](/influxdb/cloud/visualize-data/labels/) operationId: GetBucketsIDLabels parameters: - $ref: '#/components/parameters/TraceSpan' @@ -7814,36 +7384,24 @@ paths: tags: - Buckets post: - description: > + description: | Adds a label to a bucket and returns the new label information. - - Labels are objects that contain `labelID`, `name`, `description`, and - `color` - - key-value pairs. They may be used for grouping and filtering across one - or - + Labels are objects that contain `labelID`, `name`, `description`, and `color` + key-value pairs. They may be used for grouping and filtering across one or more kinds of **resources**--for example, you can apply a label named - `air_sensor` to a bucket and a task to quickly organize resources. - #### Limitations - - Before adding a label to a bucket, you must create the label if you haven't already. To create a label with the InfluxDB API, send a `POST` request to the [`/api/v2/labels` endpoint](#operation/PostLabels)). #### Related guides - - - Use the [`/api/v2/labels` InfluxDB API endpoint](#tag/Labels) to - retrieve and manage labels. - - - [Manage labels in the InfluxDB - UI](/influxdb/cloud/visualize-data/labels/) + - Use the [`/api/v2/labels` InfluxDB API endpoint](#tag/Labels) to retrieve and manage labels. + - [Manage labels in the InfluxDB UI](/influxdb/cloud/visualize-data/labels/) operationId: PostBucketsIDLabels parameters: - $ref: '#/components/parameters/TraceSpan' @@ -7921,9 +7479,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request POST - "http://localhost:8086/api/v2/buckets/BUCKETS_ID/labels \ + source: | + curl --request POST "http://localhost:8086/api/v2/buckets/BUCKETS_ID/labels \ --header "Authorization: Token INFLUX_TOKEN" \ --header "Accept: application/json" \ --header "Content-Type: application/json" \ @@ -8091,7 +7648,7 @@ paths: $ref: '#/components/responses/BadRequestError' examples: invalidRequest: - summary: The `userID` is missing from the request body. + summary: The user `id` is missing from the request body. value: code: invalid message: user id missing or invalid @@ -8113,10 +7670,9 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request POST - "http://localhost:8086/api/v2/buckets/BUCKET_ID/members \ - --header "Authorization: Token INFLUX_TOKEN" \ + source: | + curl --request POST "http://localhost:8086/api/v2/buckets/BUCKET_ID/members \ + --header "Authorization: Token INFLUX_API_TOKEN" \ --header "Accept: application/json" \ --header "Content-Type: application/json" \ --data '{ @@ -8124,21 +7680,15 @@ paths: } /api/v2/buckets/{bucketID}/members/{userID}: delete: - description: > + description: | Removes a member from a bucket. - - Use this endpoint to remove a user's member privileges from a bucket. - This - + Use this endpoint to remove a user's member privileges from a bucket. This removes the user's `read` and `write` permissions for the bucket. - #### Related guides - - [Manage users](/influxdb/cloud/users/) - - [Manage members](/influxdb/cloud/organizations/members/) operationId: DeleteBucketsIDMembersID parameters: @@ -8179,10 +7729,42 @@ paths: - Buckets /api/v2/buckets/{bucketID}/owners: get: + description: | + Retrieves a list of all [owners](/influxdb/cloud/reference/glossary/#owner) + for a bucket. + + Bucket owners have permission to delete buckets and remove user and member + permissions from the bucket. + + #### InfluxDB Cloud + + - Doesn't use `owner` and `member` roles. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. + + #### Limitations + + - Owner permissions are separate from API token permissions. + - Owner permissions are used in the context of the InfluxDB UI. + + #### Required permissions + + - `read-orgs INFLUX_ORG_ID` + + `INFLUX_ORG_ID` is the ID of the organization that you want to retrieve a + list of owners for. + + #### Related endpoints + + - [Authorizations](#tag/Authorizations) + + #### Related guides + + - [Manage users](/influxdb/cloud/users/) operationId: GetBucketsIDOwners parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The bucket ID. + - description: | + The ID of the bucket to retrieve owners for. in: path name: bucketID required: true @@ -8192,9 +7774,31 @@ paths: '200': content: application/json: + examples: + successResponse: + value: + links: + self: /api/v2/buckets/BUCKET_ID/owners + users: + - id: d88d182d91b0950f + links: + self: /api/v2/users/d88d182d91b0950f + name: example-owner + role: owner + status: active schema: $ref: '#/components/schemas/ResourceOwners' - description: A list of bucket owners + description: | + Success. + The response body contains a list of all owners for the bucket. + '400': + $ref: '#/components/responses/BadRequestError' + '401': + $ref: '#/components/responses/AuthorizationError' + '404': + $ref: '#/components/responses/ResourceNotFoundError' + '500': + $ref: '#/components/responses/InternalServerError' default: content: application/json: @@ -8205,10 +7809,41 @@ paths: tags: - Buckets post: + description: | + Adds an owner to a bucket and returns the [owners](/influxdb/cloud/reference/glossary/#owner) + with role and user detail. + + Use this endpoint to create a _resource owner_ for the bucket. + Bucket owners have permission to delete buckets and remove user and member + permissions from the bucket. + + #### InfluxDB Cloud + + - Doesn't use `owner` and `member` roles. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. + + #### Limitations + + - Owner permissions are separate from API token permissions. + - Owner permissions are used in the context of the InfluxDB UI. + + #### Required permissions + + - `write-orgs INFLUX_ORG_ID` + `INFLUX_ORG_ID` is the ID of the organization that you want add an owner for. + + #### Related endpoints + + - [Authorizations](#tag/Authorizations) + + #### Related guides + + - [Manage users](/influxdb/cloud/users/) operationId: PostBucketsIDOwners parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The bucket ID. + - description: | + The ID of the bucket to add an owner for. in: path name: bucketID required: true @@ -8217,9 +7852,18 @@ paths: requestBody: content: application/json: + examples: + successResponse: + value: + id: d88d182d91b0950f + links: + self: /api/v2/users/d88d182d91b0950f + name: example-user + role: owner + status: active schema: $ref: '#/components/schemas/AddResourceMemberRequestBody' - description: User to add as owner + description: A user to add as an owner for the bucket. required: true responses: '201': @@ -8227,7 +7871,25 @@ paths: application/json: schema: $ref: '#/components/schemas/ResourceOwner' - description: Success. The user is an owner of the bucket + description: | + Created. + The bucket `owner` role is assigned to the user. + The response body contains the resource owner with + role and user detail. + '400': + $ref: '#/components/responses/BadRequestError' + examples: + invalidRequest: + summary: The user `id` is missing from the request body. + value: + code: invalid + message: user id missing or invalid + '401': + $ref: '#/components/responses/AuthorizationError' + '404': + $ref: '#/components/responses/ResourceNotFoundError' + '500': + $ref: '#/components/responses/InternalServerError' default: content: application/json: @@ -8237,18 +7899,59 @@ paths: summary: Add an owner to a bucket tags: - Buckets + x-codeSamples: + - label: cURL + lang: Shell + source: | + curl --request POST "http://localhost:8086/api/v2/buckets/BUCKET_ID/owners \ + --header "Authorization: Token INFLUX_API_TOKEN" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data '{ + "id": "09cfb87051cbe000" + } /api/v2/buckets/{bucketID}/owners/{userID}: delete: + description: | + Removes an owner from a bucket. + + Use this endpoint to remove a user's `owner` role for a bucket. + + #### InfluxDB Cloud + + - Doesn't use `owner` and `member` roles. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. + + #### Limitations + + - Owner permissions are separate from API token permissions. + - Owner permissions are used in the context of the InfluxDB UI. + + #### Required permissions + + - `write-orgs INFLUX_ORG_ID` + `INFLUX_ORG_ID` is the ID of the organization that you want to remove an owner + from. + + #### Related endpoints + + - [Authorizations](#tag/Authorizations) + + #### Related guides + + - [Manage users](/influxdb/cloud/users/) operationId: DeleteBucketsIDOwnersID parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The ID of the owner to remove. + - description: | + The ID of the owner to remove. in: path name: userID required: true schema: type: string - - description: The bucket ID. + - description: | + The ID of the bucket to remove an owner from. in: path name: bucketID required: true @@ -8256,7 +7959,15 @@ paths: type: string responses: '204': - description: Owner removed + description: | + Success. + The user is no longer an owner of the bucket. + '401': + $ref: '#/components/responses/AuthorizationError' + '404': + $ref: '#/components/responses/ResourceNotFoundError' + '500': + $ref: '#/components/responses/InternalServerError' default: content: application/json: @@ -8350,42 +8061,31 @@ paths: application/json: examples: badNameExample: - description: >- - The error returned when the name is invalid, such as too few - or too many characters or the name contains non-printable - ASCII or is not valid UTF-8. + description: The error returned when the name is invalid, such as too few or too many characters or the name contains non-printable ASCII or is not valid UTF-8. summary: Invalid name value: code: invalid message: name is invalid duplicateColumnNamesExample: - description: >- - The error returned when the request body contains duplicate - column names. + description: The error returned when the request body contains duplicate column names. summary: Duplicate column names value: code: invalid message: Duplicate column names missingColumnsExample: - description: >- - The error returned when the request body is missing the - columns property. + description: The error returned when the request body is missing the columns property. summary: Missing columns value: code: invalid message: columns is required missingFieldExample: - description: >- - The error returned when the request body is missing at least - one field type column. + description: The error returned when the request body is missing at least one field type column. summary: Missing field value: code: invalid message: At least one field column is required missingTimestampExample: - description: >- - The error returned when the request body is missing a - timestamp type column. + description: The error returned when the request body is missing a timestamp type column. summary: Missing timestamp value: code: invalid @@ -8480,9 +8180,7 @@ paths: application/json: examples: missingColumnsExample: - description: >- - The error returned when the request body does not contain - all the columns from the source. + description: The error returned when the request body does not contain all the columns from the source. summary: Deleted columns value: code: invalid @@ -8830,9 +8528,7 @@ paths: maximum: 100 minimum: -1 type: integer - - description: >- - A user identifier. Returns only dashboards where this user has the - `owner` role. + - description: A user identifier. Returns only dashboards where this user has the `owner` role. in: query name: owner schema: @@ -8846,9 +8542,7 @@ paths: - CreatedAt - UpdatedAt type: string - - description: >- - A list of dashboard identifiers. Returns only the listed dashboards. - If both `id` and `owner` are specified, only `id` is used. + - description: A list of dashboard identifiers. Returns only the listed dashboards. If both `id` and `owner` are specified, only `id` is used. in: query name: id schema: @@ -8998,9 +8692,7 @@ paths: properties: cells: $ref: '#/components/schemas/CellWithViewProperties' - description: >- - optional, when provided will replace all existing cells with - the cells provided + description: optional, when provided will replace all existing cells with the cells provided description: description: optional, when provided will replace the description type: string @@ -9075,9 +8767,7 @@ paths: - Cells - Dashboards put: - description: >- - Replaces all cells in a dashboard. This is used primarily to update the - positional information of all cells. + description: Replaces all cells in a dashboard. This is used primarily to update the positional information of all cells. operationId: PutDashboardsIDCells parameters: - $ref: '#/components/parameters/TraceSpan' @@ -9153,9 +8843,7 @@ paths: - Cells - Dashboards patch: - description: >- - Updates the non positional information related to a cell. Updates to a - single cell's positional data could cause grid conflicts. + description: Updates the non positional information related to a cell. Updates to a single cell's positional data could cause grid conflicts. operationId: PatchDashboardsIDCellsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -9793,17 +9481,13 @@ paths: - DBRPs /api/v2/delete: post: - description: > + description: | Deletes data from a bucket. - - Use this endpoint to delete points from a bucket in a specified time - range. - + Use this endpoint to delete points from a bucket in a specified time range. #### InfluxDB Cloud - - Does the following when you send a delete request: 1. Validates the request and queues the delete. @@ -9812,103 +9496,74 @@ paths: #### InfluxDB OSS - - Validates the request, handles the delete synchronously, and then responds with success or failure. #### Required permissions - - `write-buckets` or `write-bucket BUCKET_ID`. `BUCKET_ID` is the ID of the destination bucket. #### Rate limits (with InfluxDB Cloud) - `write` rate limits apply. - - For more information, see [limits and adjustable - quotas](/influxdb/cloud/account-management/limits/). - + For more information, see [limits and adjustable quotas](/influxdb/cloud/account-management/limits/). #### Related guides - - - [Delete data](/influxdb/cloud/write-data/delete-data/). - - - Learn how to use [delete predicate - syntax](/influxdb/cloud/reference/syntax/delete-predicate/). - - - Learn how InfluxDB handles [deleted - tags](https://docs.influxdata.com/flux/v0.x/stdlib/influxdata/influxdb/schema/measurementtagkeys/) + - [Delete data](/influxdb/cloud/write-data/delete-data/) + - Learn how to use [delete predicate syntax](/influxdb/cloud/reference/syntax/delete-predicate/). + - Learn how InfluxDB handles [deleted tags](https://docs.influxdata.com/flux/v0.x/stdlib/influxdata/influxdb/schema/measurementtagkeys/) and [deleted fields](https://docs.influxdata.com/flux/v0.x/stdlib/influxdata/influxdb/schema/measurementfieldkeys/). operationId: PostDelete parameters: - $ref: '#/components/parameters/TraceSpan' - - description: > + - description: | The organization to delete data from. - If you pass both `orgID` and `org`, they must both be valid. - #### InfluxDB Cloud - - Doesn't require `org` or `orgID`. - - - Deletes data from the bucket in the organization associated with - the authorization (API token). - + - Deletes data from the bucket in the organization associated with the authorization (API token). #### InfluxDB OSS - - Requires either `org` or `orgID`. in: query name: org schema: description: The organization name or ID. type: string - - description: > + - description: | The name or ID of the bucket to delete data from. - - If you pass both `bucket` and `bucketID`, `bucketID` takes - precedence. + If you pass both `bucket` and `bucketID`, `bucketID` takes precedence. in: query name: bucket schema: description: The bucket name or ID. type: string - - description: > + - description: | The ID of the organization to delete data from. - If you pass both `orgID` and `org`, they must both be valid. - #### InfluxDB Cloud - - Doesn't require `org` or `orgID`. - - - Deletes data from the bucket in the organization associated with - the authorization (API token). - + - Deletes data from the bucket in the organization associated with the authorization (API token). #### InfluxDB OSS - - Requires either `org` or `orgID`. in: query name: orgID schema: description: The organization ID. type: string - - description: > + - description: | The ID of the bucket to delete data from. - - If you pass both `bucket` and `bucketID`, `bucketID` takes - precedence. + If you pass both `bucket` and `bucketID`, `bucketID` takes precedence. in: query name: bucketID schema: @@ -9919,61 +9574,38 @@ paths: application/json: schema: $ref: '#/components/schemas/DeletePredicateRequest' - description: > + description: | Time range parameters and an optional **delete predicate expression**. - To select points to delete within the specified time range, pass a - - **delete predicate expression** in the `predicate` property of the - request body. - - If you don't pass a `predicate`, InfluxDB deletes all data with - timestamps - + **delete predicate expression** in the `predicate` property of the request body. + If you don't pass a `predicate`, InfluxDB deletes all data with timestamps in the specified time range. - #### Related guides - - - [Delete data](/influxdb/cloud/write-data/delete-data/). - - - Learn how to use [delete predicate - syntax](/influxdb/cloud/reference/syntax/delete-predicate/). + - [Delete data](/influxdb/cloud/write-data/delete-data/) + - Learn how to use [delete predicate syntax](/influxdb/cloud/reference/syntax/delete-predicate/). required: true responses: '204': - description: > + description: | Success. - #### InfluxDB Cloud - - Validated and queued the request. + - Handles the delete asynchronously - the deletion might not have completed yet. - - Handles the delete asynchronously - the deletion might not have - completed yet. - - - An HTTP `2xx` status code acknowledges that the write or delete is - queued. - - To ensure that InfluxDB Cloud handles writes and deletes in the - order you request them, - + An HTTP `2xx` status code acknowledges that the write or delete is queued. + To ensure that InfluxDB Cloud handles writes and deletes in the order you request them, wait for a response before you send the next request. - Because writes are asynchronous, data might not yet be written - when you receive the response. - #### InfluxDB OSS - - Deleted the data. '400': content: @@ -9986,17 +9618,13 @@ paths: message: 'failed to decode request body: organization not found' schema: $ref: '#/components/schemas/Error' - description: > + description: | Bad request. - The response body contains detail about the error. - #### InfluxDB OSS - - - Returns this error if `org` or `orgID` doesn't match an - organization. + - Returns this error if `org` or `orgID` doesn't match an organization. '401': $ref: '#/components/responses/AuthorizationError' '404': @@ -10012,9 +9640,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request POST - INFLUX_URL/api/v2/delete?org=INFLUX_ORG&bucket=INFLUX_BUCKET \ + source: | + curl --request POST INFLUX_URL/api/v2/delete?org=INFLUX_ORG&bucket=INFLUX_BUCKET \ --header 'Authorization: Token INFLUX_API_TOKEN' \ --header 'Content-Type: application/json' \ --data '{ @@ -10202,9 +9829,7 @@ paths: application/json: schema: $ref: '#/components/schemas/UserResponse' - description: >- - Success. The response body contains the currently authenticated - user. + description: Success. The response body contains the currently authenticated user. '401': $ref: '#/components/responses/AuthorizationError' '500': @@ -10217,13 +9842,50 @@ paths: /api/v2/me/password: put: description: | + Updates the password for the signed-in [user](/influxdb/cloud/reference/glossary/#user). + + This endpoint represents the third step in the following three-step process to let a + user with a user session update their password: + 1. Pass the user's [Basic authentication credentials](#section/Authentication/BasicAuthentication) to the `POST /api/v2/signin` + endpoint to create a user session and generate a session cookie. + 2. From the response in the first step, extract the session cookie (`Set-Cookie`) header. + 3. Pass the following in a request to the `PUT /api/v2/me/password` endpoint: + - The `Set-Cookie` header from the second step + - The `Authorization Basic` header with the user's _Basic authentication_ credentials + - `{"password": "NEW_PASSWORD"}` in the request body + #### InfluxDB Cloud - InfluxDB Cloud doesn't support changing user passwords through the API. - Use the InfluxDB Cloud user interface to update your password. + - Doesn't allow you to manage passwords through the API. + Use the InfluxDB Cloud user interface (UI) to update your password. + + #### Related endpoints + + - [Signin](#tag/Signin) + - [Signout](#tag/Signout) + - [Users](#tag/Users) + + #### Related guides + + - [InfluxDB Cloud - Change your password](/influxdb/cloud/account-management/change-password/) + - [InfluxDB OSS - Change your password](/influxdb/latest/users/change-password/) operationId: PutMePassword parameters: - $ref: '#/components/parameters/TraceSpan' + - description: | + The user session cookie for the + [user](/influxdb/cloud/reference/glossary/#user) + signed in with [Basic authentication credentials](#section/Authentication/BasicAuthentication). + + #### Related guides + + - [Manage users](/influxdb/cloud/users/) + example: influxdb-oss-session=19aaaZZZGOvP2GGryXVT2qYftlFKu3bIopurM6AGFow1yF1abhtOlbHfsc-d8gozZFC_6WxmlQIAwLMW5xs523w== + in: cookie + name: influxdb-oss-session + required: true + schema: + type: string requestBody: content: application/json: @@ -10233,13 +9895,20 @@ paths: required: true responses: '204': - description: Success. The password was updated. + description: Success. The password is updated. '400': - description: > + description: | Bad request. - InfluxDB Cloud doesn't support changing passwords through the API - and always responds with this status. + #### InfluxDB Cloud + + - Doesn't allow you to manage through the API; always responds with this status. + + #### InfluxDB OSS + + - Doesn't understand a value passed in the request. + '401': + $ref: '#/components/responses/AuthorizationError' default: content: application/json: @@ -10258,9 +9927,7 @@ paths: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/Offset' - $ref: '#/components/parameters/Limit' - - description: >- - Only show notification endpoints that belong to specific - organization ID. + - description: Only show notification endpoints that belong to specific organization ID. in: query name: orgID required: true @@ -10542,9 +10209,7 @@ paths: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/Offset' - $ref: '#/components/parameters/Limit' - - description: >- - Only show notification rules that belong to a specific organization - ID. + - description: Only show notification rules that belong to a specific organization ID. in: query name: orgID required: true @@ -10555,9 +10220,7 @@ paths: name: checkID schema: type: string - - description: >- - Only return notification rules that "would match" statuses which - contain the tag key value pairs provided. + - description: Only return notification rules that "would match" statuses which contain the tag key value pairs provided. in: query name: tag schema: @@ -10874,29 +10537,19 @@ paths: - Rules /api/v2/orgs: get: - description: > - Retrieves a list of - [organizations](/influxdb/cloud/reference/glossary/#organization/). - - - To limit which organizations are returned, pass query parameters in your - request. - - If no query parameters are passed, InfluxDB returns all organizations up - to the default `limit`. + description: | + Retrieves a list of [organizations](/influxdb/cloud/reference/glossary/#organization/). + To limit which organizations are returned, pass query parameters in your request. + If no query parameters are passed, InfluxDB returns all organizations up to the default `limit`. #### InfluxDB Cloud - - - Only returns the organization that owns the token passed in the - request. - + - Only returns the organization that owns the token passed in the request. #### Related guides - - - [View organizations](/influxdb/cloud/organizations/view-orgs/). + - [View organizations](/influxdb/cloud/organizations/view-orgs/) operationId: GetOrgs parameters: - $ref: '#/components/parameters/TraceSpan' @@ -10967,22 +10620,16 @@ paths: - Organizations - Security and access endpoints post: - description: > - Creates an - [organization](/influxdb/cloud/reference/glossary/#organization) - + description: | + Creates an [organization](/influxdb/cloud/reference/glossary/#organization) and returns the newly created organization. - #### InfluxDB Cloud - - Doesn't allow you to use this endpoint to create organizations. - #### Related guides - - [Manage organizations](/influxdb/cloud/organizations) operationId: PostOrgs parameters: @@ -11081,7 +10728,7 @@ paths: #### Related guides - - [Delete organization](/influxdb/cloud/organizations/delete-orgs/) + - [Delete organizations](/influxdb/cloud/organizations/delete-orgs/) operationId: DeleteOrgsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -11140,7 +10787,7 @@ paths: #### Related guides - - [View organization](/influxdb/cloud/organizations/view-orgs/) + - [View organizations](/influxdb/cloud/organizations/view-orgs/) operationId: GetOrgsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -11190,40 +10837,26 @@ paths: - Organizations - Security and access endpoints patch: - description: > + description: | Updates an organization. - Use this endpoint to update properties - (`name`, `description`) of an organization. - Updating an organization’s name affects all resources that reference the - organization by name, including the following: - - Queries - - Dashboards - - Tasks - - Telegraf configurations - - Templates - - If you change an organization name, be sure to update the organization - name - + If you change an organization name, be sure to update the organization name in these resources as well. - #### Related Guides - - [Update an organization](/influxdb/cloud/organizations/update-org/) operationId: PatchOrgsID parameters: @@ -11297,51 +10930,35 @@ paths: - Limits /api/v2/orgs/{orgID}/members: get: - description: > + description: | Retrieves a list of all users that belong to an organization. - InfluxDB [users](/influxdb/cloud/reference/glossary/#user) have - permission to access InfluxDB. - [Members](/influxdb/cloud/reference/glossary/#member) are users - within the organization. - #### InfluxDB Cloud - - Doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. #### Limitations - - Member permissions are separate from API token permissions. - - Member permissions are used in the context of the InfluxDB UI. - #### Required permissions - - `read-orgs INFLUX_ORG_ID` - - `INFLUX_ORG_ID` is the ID of the organization that you retrieve a list - of - + `INFLUX_ORG_ID` is the ID of the organization that you retrieve a list of members from. - #### Related guides - - [Manage users](/influxdb/cloud/users/) - - [Manage members](/influxdb/cloud/organizations/members/) operationId: GetOrgsIDMembers parameters: @@ -11377,11 +10994,9 @@ paths: status: active schema: $ref: '#/components/schemas/ResourceMembers' - description: > + description: | Success. - - The response body contains a list of all users within the - organization. + The response body contains a list of all users within the organization. '400': $ref: '#/components/responses/BadRequestError' '401': @@ -11414,48 +11029,33 @@ paths: - Organizations - Security and access endpoints post: - description: > - Adds a user to an organization. - + description: | + Add a user to an organization. InfluxDB [users](/influxdb/cloud/reference/glossary/#user) have - permission to access InfluxDB. - [Members](/influxdb/cloud/reference/glossary/#member) are users - within the organization. - #### InfluxDB Cloud - - Doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. #### Limitations - - Member permissions are separate from API token permissions. - - Member permissions are used in the context of the InfluxDB UI. - #### Required permissions - - `write-orgs INFLUX_ORG_ID` - - `INFLUX_ORG_ID` is the ID of the organization that you want add a member - to. - + `INFLUX_ORG_ID` is the ID of the organization that you want add a member to. #### Related guides - - [Manage users](/influxdb/cloud/users/) - - [Manage members](/influxdb/cloud/organizations/members/) operationId: PostOrgsIDMembers parameters: @@ -11492,7 +11092,7 @@ paths: $ref: '#/components/schemas/ResourceMember' description: | Success. - The response body contains the user. + The response body contains the user information. '400': $ref: '#/components/responses/BadRequestError' examples: @@ -11519,9 +11119,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request POST - "http://localhost:8086/api/v2/orgs/INFLUX_ORG_ID/members \ + source: | + curl --request POST "http://localhost:8086/api/v2/orgs/INFLUX_ORG_ID/members \ --header "Authorization: Token INFLUX_API_TOKEN" \ --header "Accept: application/json" \ --header "Content-Type: application/json" \ @@ -11530,41 +11129,31 @@ paths: }' /api/v2/orgs/{orgID}/members/{userID}: delete: - description: > + description: | Removes a member from an organization. - - Use this endpoint to remove a user's `read` and `write` permissions for the organization. - + Use this endpoint to remove a user's member privileges from a bucket. This + removes the user's `read` and `write` permissions from the organization. #### InfluxDB Cloud - - Doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. #### Limitations - - Member permissions are separate from API token permissions. - - Member permissions are used in the context of the InfluxDB UI. - #### Required permissions - - `write-orgs INFLUX_ORG_ID` - `INFLUX_ORG_ID` is the ID of the organization that you want to remove an - owner from. - #### Related guides - - [Manage members](/influxdb/cloud/organizations/members/) operationId: DeleteOrgsIDMembersID parameters: @@ -11604,25 +11193,19 @@ paths: - Security and access endpoints /api/v2/orgs/{orgID}/owners: get: - description: > + description: | Retrieves a list of all owners of an organization. - #### InfluxDB Cloud - - Doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. #### Required permissions - - `read-orgs INFLUX_ORG_ID` - - `INFLUX_ORG_ID` is the ID of the organization that you want to retrieve - a - + `INFLUX_ORG_ID` is the ID of the organization that you want to retrieve a list of owners from. operationId: GetOrgsIDOwners parameters: @@ -11670,32 +11253,24 @@ paths: - Organizations - Security and access endpoints post: - description: > + description: | Adds an owner to an organization. - Use this endpoint to assign the organization `owner` role to a user. - #### InfluxDB Cloud - - Doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. #### Required permissions - - `write-orgs INFLUX_ORG_ID` - - `INFLUX_ORG_ID` is the ID of the organization that you want add an owner - for. - + `INFLUX_ORG_ID` is the ID of the organization that you want add an owner for. #### Related endpoints - - [Authorizations](#tag/Authorizations) operationId: PostOrgsIDOwners parameters: @@ -11751,9 +11326,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request POST - "http://localhost:8086/api/v2/orgs/INFLUX_ORG_ID/owners \ + source: | + curl --request POST "http://localhost:8086/api/v2/orgs/INFLUX_ORG_ID/owners \ --header "Authorization: Token INFLUX_API_TOKEN" \ --header "Accept: application/json" \ --header "Content-Type: application/json" \ @@ -11762,45 +11336,30 @@ paths: }' /api/v2/orgs/{orgID}/owners/{userID}: delete: - description: > + description: | Removes an [owner](/influxdb/cloud/reference/glossary/#owner) from - the organization. - - Organization owners have permission to delete organizations and remove - user and member - + Organization owners have permission to delete organizations and remove user and member permissions from the organization. - #### InfluxDB Cloud - - Doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. #### Limitations - - Owner permissions are separate from API token permissions. - - Owner permissions are used in the context of the InfluxDB UI. - #### Required permissions - - `write-orgs INFLUX_ORG_ID` - - `INFLUX_ORG_ID` is the ID of the organization that you want remove an - owner - + `INFLUX_ORG_ID` is the ID of the organization that you want remove an owner from. - #### Related endpoints - - [Authorizations](#tag/Authorizations) operationId: DeleteOrgsIDOwnersID parameters: @@ -11965,22 +11524,18 @@ paths: required: true schema: type: string - - description: > + - description: | Earliest time to include in results. - - For more information about timestamps, see [Manipulate timestamps - with Flux](/influxdb/cloud/query-data/flux/manipulate-timestamps/). + For more information about timestamps, see [Manipulate timestamps with Flux](/influxdb/cloud/query-data/flux/manipulate-timestamps/). in: query name: start required: true schema: format: unix timestamp type: integer - - description: > + - description: | Latest time to include in results. - - For more information about timestamps, see [Manipulate timestamps - with Flux](/influxdb/cloud/query-data/flux/manipulate-timestamps/). + For more information about timestamps, see [Manipulate timestamps with Flux](/influxdb/cloud/query-data/flux/manipulate-timestamps/). in: query name: stop required: false @@ -11999,25 +11554,16 @@ paths: content: text/csv: schema: - example: > - #group,false,false,true,true,false,false,true,true,true,true - #datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string,string - #default,_result,,,,,,,,, - ,result,table,_start,_stop,_time,_value,_field,_measurement,bucket_id,org_id - ,,0,2021-05-10T14:25:10.865702397Z,2021-05-10T15:25:10.865702397Z,2021-05-10T15:00:00Z,5434066,gauge,storage_usage_bucket_bytes,2f6ba0cf9a2fdcbb,cec6fc1d2176dc11 - ,,1,2021-05-10T14:25:10.865702397Z,2021-05-10T15:25:10.865702397Z,2021-05-10T15:00:00Z,9924053.966666665,gauge,storage_usage_bucket_bytes,8af67bcaf69d9daf,cec6fc1d2176dc11 + example: | + #group,false,false,true,true,false,false,true,true,true,true #datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string,string #default,_result,,,,,,,,, ,result,table,_start,_stop,_time,_value,_field,_measurement,bucket_id,org_id ,,0,2021-05-10T14:25:10.865702397Z,2021-05-10T15:25:10.865702397Z,2021-05-10T15:00:00Z,5434066,gauge,storage_usage_bucket_bytes,2f6ba0cf9a2fdcbb,cec6fc1d2176dc11 ,,1,2021-05-10T14:25:10.865702397Z,2021-05-10T15:25:10.865702397Z,2021-05-10T15:00:00Z,9924053.966666665,gauge,storage_usage_bucket_bytes,8af67bcaf69d9daf,cec6fc1d2176dc11 type: string description: Usage data headers: Content-Encoding: - description: >- - Lists any encodings (usually compression algorithms) that have - been applied to the response payload. + description: Lists any encodings (usually compression algorithms) that have been applied to the response payload. schema: default: identity - description: >- - The content coding. `gzip` for compressed data or `identity` - for unmodified, uncompressed data. + description: The content coding. `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity @@ -12074,44 +11620,29 @@ paths: - Ping /api/v2/query: post: - description: > + description: | Retrieves data from buckets. - - Use this endpoint to send a Flux query request and retrieve data from a - bucket. - + Use this endpoint to send a Flux query request and retrieve data from a bucket. #### Rate limits (with InfluxDB Cloud) - `read` rate limits apply. - - For more information, see [limits and adjustable - quotas](/influxdb/cloud/account-management/limits/). - + For more information, see [limits and adjustable quotas](/influxdb/cloud/account-management/limits/). #### Related guides - - - [Query with the InfluxDB - API](/influxdb/cloud/query-data/execute-queries/influx-api/). - - - [Get started with - Flux](https://docs.influxdata.com/flux/v0.x/get-started/) + - [Query with the InfluxDB API](/influxdb/cloud/query-data/execute-queries/influx-api/) + - [Get started with Flux](https://docs.influxdata.com/flux/v0.x/get-started/) operationId: PostQuery parameters: - $ref: '#/components/parameters/TraceSpan' - - description: >- - The content encoding (usually a compression algorithm) that the - client can understand. + - description: The content encoding (usually a compression algorithm) that the client can understand. in: header name: Accept-Encoding schema: default: identity - description: >- - The content coding. Use `gzip` for compressed data or `identity` - for unmodified, uncompressed data. + description: The content coding. Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity @@ -12123,43 +11654,31 @@ paths: - application/json - application/vnd.flux type: string - - description: > + - description: | The name or ID of the organization executing the query. - #### InfluxDB Cloud - - Doesn't use `org` or `orgID`. - - - Queries the bucket in the organization associated with the - authorization (API token). - + - Queries the bucket in the organization associated with the authorization (API token). #### InfluxDB OSS - - Requires either `org` or `orgID`. in: query name: org schema: type: string - - description: > + - description: | The ID of the organization executing the query. - #### InfluxDB Cloud - - Doesn't use `org` or `orgID`. - - - Queries the bucket in the organization associated with the - authorization (API token). - + - Queries the bucket in the organization associated with the authorization (API token). #### InfluxDB OSS - - Requires either `org` or `orgID`. in: query name: orgID @@ -12182,27 +11701,21 @@ paths: '200': content: application/csv: - example: > + example: | result,table,_start,_stop,_time,region,host,_value - mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43 - mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25 - mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62 schema: type: string description: Success. The response body contains query results. headers: Content-Encoding: - description: >- - Lists encodings (usually compression algorithms) that have been - applied to the response payload. + description: Lists encodings (usually compression algorithms) that have been applied to the response payload. schema: default: identity - description: > - The content coding: `gzip` for compressed data or `identity` - for unmodified, uncompressed data. + description: | + The content coding: `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity @@ -12223,17 +11736,13 @@ paths: message: 'failed to decode request body: organization not found' schema: $ref: '#/components/schemas/Error' - description: > + description: | Bad request. - The response body contains detail about the error. - #### InfluxDB OSS - - - Returns this error if `org` or `orgID` doesn't match an - organization. + - Returns this error if `org` or `orgID` doesn't match an organization. '401': $ref: '#/components/responses/AuthorizationError' '404': @@ -12251,9 +11760,7 @@ paths: - doesn't return this error. headers: Retry-After: - description: >- - Non-negative decimal integer indicating seconds to wait before - retrying the request. + description: Non-negative decimal integer indicating seconds to wait before retrying the request. schema: format: int32 type: integer @@ -12278,13 +11785,10 @@ paths: |> filter(fn: (r) => r._measurement == "example-measurement")' /api/v2/query/analyze: post: - description: > - Analyzes a [Flux query](https://docs.influxdata.com/flux/v0.x/) for - syntax - + description: | + Analyzes a [Flux query](https://docs.influxdata.com/flux/v0.x/) for syntax errors and returns the list of errors. - In the following sample query, `from()` is missing the property key. ```json @@ -12296,14 +11800,10 @@ paths: ``` If you pass this in a request to the `/api/v2/analyze` endpoint, - - InfluxDB returns an `errors` list that contains an error object for the - missing key. - + InfluxDB returns an `errors` list that contains an error object for the missing key. #### Limitations - - The endpoint doesn't validate values in the query--for example: - The following sample query has correct syntax, but contains an incorrect `from()` property key: @@ -12339,22 +11839,17 @@ paths: application/json: examples: missingQueryPropertyKey: - description: > - Returns an error object if the Flux query is missing a - property key. + description: | + Returns an error object if the Flux query is missing a property key. + The following sample query is missing the _`bucket`_ property key: - The following sample query is missing the _`bucket`_ - property key: - - ```json - { - "query": "from(: \"iot_center\")\ - - ... - - } - ``` + ```json + { + "query": "from(: \"iot_center\")\ + ... + } + ``` summary: Missing property key error value: errors: @@ -12364,27 +11859,20 @@ paths: message: missing property key schema: $ref: '#/components/schemas/AnalyzeQueryResponse' - description: > + description: | Success. - The response body contains the list of `errors`. - - If the query syntax is valid, the endpoint returns an empty `errors` - list. + If the query syntax is valid, the endpoint returns an empty `errors` list. '400': content: application/json: examples: invalidJSONStringValue: - description: >- - If the request body contains invalid JSON, returns `invalid` - and problem detail. + description: If the request body contains invalid JSON, returns `invalid` and problem detail. summary: Invalid JSON value: code: invalid - message: >- - invalid json: invalid character '\'' looking for beginning - of value + message: 'invalid json: invalid character ''\'''' looking for beginning of value' schema: $ref: '#/components/schemas/Error' description: | @@ -12403,9 +11891,8 @@ paths: application/json: examples: emptyJSONObject: - description: > - If the request body contains an empty JSON object, returns - `internal error`. + description: | + If the request body contains an empty JSON object, returns `internal error`. summary: Empty JSON object in request body value: code: internal error @@ -12448,34 +11935,20 @@ paths: EOF /api/v2/query/ast: post: - description: > - Analyzes a Flux query and returns a complete package source [Abstract - Syntax - - Tree - (AST)](/influxdb/cloud/reference/glossary/#abstract-syntax-tree-ast) - + description: | + Analyzes a Flux query and returns a complete package source [Abstract Syntax + Tree (AST)](/influxdb/cloud/reference/glossary/#abstract-syntax-tree-ast) for the query. - - Use this endpoint for deep query analysis such as debugging unexpected - query - + Use this endpoint for deep query analysis such as debugging unexpected query results. - - A Flux query AST provides a semantic, tree-like representation with - contextual - - information about the query. The AST illustrates how the query is - distributed - + A Flux query AST provides a semantic, tree-like representation with contextual + information about the query. The AST illustrates how the query is distributed into different components for execution. - #### Limitations - - The endpoint doesn't validate values in the query--for example: The following sample Flux query has correct syntax, but contains an incorrect `from()` property key: @@ -12495,6 +11968,7 @@ paths: ``` The following code sample shows how to pass the query as JSON in the request body: + ```json { "query": "from(foo: \"iot_center\")\ |> range(start: -90d)\ @@ -12674,9 +12148,7 @@ paths: end: column: 47 line: 1 - source: >- - from(bucket: "example-bucket") |> - range(start: -5m) + source: 'from(bucket: "example-bucket") |> range(start: -5m)' start: column: 1 line: 1 @@ -12687,9 +12159,7 @@ paths: end: column: 108 line: 1 - source: >- - fn: (r) => r._measurement == - "example-measurement" + source: 'fn: (r) => r._measurement == "example-measurement"' start: column: 58 line: 1 @@ -12709,9 +12179,7 @@ paths: end: column: 108 line: 1 - source: >- - fn: (r) => r._measurement == - "example-measurement" + source: 'fn: (r) => r._measurement == "example-measurement"' start: column: 58 line: 1 @@ -12775,9 +12243,7 @@ paths: end: column: 108 line: 1 - source: >- - (r) => r._measurement == - "example-measurement" + source: (r) => r._measurement == "example-measurement" start: column: 62 line: 1 @@ -12820,9 +12286,7 @@ paths: end: column: 109 line: 1 - source: >- - filter(fn: (r) => r._measurement == - "example-measurement") + source: 'filter(fn: (r) => r._measurement == "example-measurement")' start: column: 51 line: 1 @@ -12831,10 +12295,7 @@ paths: end: column: 109 line: 1 - source: >- - from(bucket: "example-bucket") |> - range(start: -5m) |> filter(fn: (r) => - r._measurement == "example-measurement") + source: 'from(bucket: "example-bucket") |> range(start: -5m) |> filter(fn: (r) => r._measurement == "example-measurement")' start: column: 1 line: 1 @@ -12843,10 +12304,7 @@ paths: end: column: 109 line: 1 - source: >- - from(bucket: "example-bucket") |> range(start: - -5m) |> filter(fn: (r) => r._measurement == - "example-measurement") + source: 'from(bucket: "example-bucket") |> range(start: -5m) |> filter(fn: (r) => r._measurement == "example-measurement")' start: column: 1 line: 1 @@ -12856,10 +12314,7 @@ paths: end: column: 109 line: 1 - source: >- - from(bucket: "example-bucket") |> range(start: - -5m) |> filter(fn: (r) => r._measurement == - "example-measurement") + source: 'from(bucket: "example-bucket") |> range(start: -5m) |> filter(fn: (r) => r._measurement == "example-measurement")' start: column: 1 line: 1 @@ -12870,20 +12325,16 @@ paths: type: Package schema: $ref: '#/components/schemas/ASTResponse' - description: > + description: | Success. - - The response body contains an Abstract Syntax Tree (AST) of the Flux - query. + The response body contains an Abstract Syntax Tree (AST) of the Flux query. '400': content: application/json: examples: invalidASTValue: - description: > - If the request body contains a missing property key in - `from()`, - + description: | + If the request body contains a missing property key in `from()`, returns `invalid` and problem detail. summary: Invalid AST value: @@ -12928,48 +12379,31 @@ paths: EOL /api/v2/query/suggestions: get: - description: > + description: | Retrieves a list of Flux query suggestions. Each suggestion contains a - - [Flux - function](https://docs.influxdata.com/flux/v0.x/stdlib/all-functions/) - + [Flux function](https://docs.influxdata.com/flux/v0.x/stdlib/all-functions/) name and parameters. - - Use this endpoint to retrieve a list of Flux query suggestions used in - the - - InfluxDB Flux Query Builder. Helper function names have an underscore - (`_`) - + Use this endpoint to retrieve a list of Flux query suggestions used in the + InfluxDB Flux Query Builder. Helper function names have an underscore (`_`) prefix and aren't meant to be used directly in queries--for example: - - **Recommended**: Use `top(n, columns=["_value"], tables=<-)` to sort on a column and keep the top n records instead of `_sortLimit_`. `top` uses the `_sortLimit` helper function. #### Limitations - - Using `/api/v2/query/suggestions/` (note the trailing slash) with cURL - will result in a HTTP `301 Moved Permanently` status code. Please use - `/api/v2/query/suggestions` without a trailing slash. - - When writing a query, avoid using `_functionName()` helper functions - exposed by this endpoint. - #### Related Guides - - - [List of all Flux - functions](/influxdb/cloud/flux/v0.x/stdlib/all-functions/). + - [List of all Flux functions](/influxdb/cloud/flux/v0.x/stdlib/all-functions/) operationId: GetQuerySuggestions parameters: - $ref: '#/components/parameters/TraceSpan' @@ -13563,26 +12997,20 @@ paths: tables: stream schema: $ref: '#/components/schemas/FluxSuggestions' - description: > + description: | Success. - - The response body contains a list of Flux query - suggestions--function - + The response body contains a list of Flux query suggestions--function names used in the Flux Query Builder autocomplete suggestions. '301': content: text/html: examples: movedPermanently: - description: > - The URL has been permanently moved. Use - `/api/v2/query/suggestions`. + description: | + The URL has been permanently moved. Use `/api/v2/query/suggestions`. summary: Invalid URL - value: > - Moved - Permanently + value: | + Moved Permanently schema: properties: body: @@ -13605,45 +13033,30 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request GET - "https://cloud2.influxdata.com/api/v2/query/suggestions" \ + source: | + curl --request GET "https://cloud2.influxdata.com/api/v2/query/suggestions" \ --header "Accept: application/json" \ --header "Authorization: Token INFLUX_API_TOKEN" /api/v2/query/suggestions/{name}: get: - description: > - Retrieves a query suggestion that contains the name and parameters of - the - + description: | + Retrieves a query suggestion that contains the name and parameters of the requested function. - - Use this endpoint to pass a branching suggestion (a Flux function name) - and - + Use this endpoint to pass a branching suggestion (a Flux function name) and retrieve the parameters of the requested function. - #### Limitations - - Use `/api/v2/query/suggestions/{name}` (without a trailing slash). - - `/api/v2/query/suggestions/{name}/` (note the trailing slash) results in - a - + `/api/v2/query/suggestions/{name}/` (note the trailing slash) results in a HTTP `301 Moved Permanently` status. - - The function `name` must exist and must be spelled correctly. - #### Related Guides - - - [List of all Flux - functions](/influxdb/cloud/flux/v0.x/stdlib/all-functions/). + - [List of all Flux functions](/influxdb/cloud/flux/v0.x/stdlib/all-functions/) operationId: GetQuerySuggestionsName parameters: - $ref: '#/components/parameters/TraceSpan' @@ -13693,9 +13106,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request GET - "https://cloud2.influxdata.com/api/v2/query/suggestions/sum/" \ + source: | + curl --request GET "https://cloud2.influxdata.com/api/v2/query/suggestions/sum/" \ --header "Accept: application/json" \ --header "Authorization: Token INFLUX_API_TOKEN" /api/v2/resources: @@ -13724,37 +13136,30 @@ paths: - System information endpoints /api/v2/scripts: get: - description: > - Retrieves a list of - [scripts](/influxdb/cloud/api-guide/api-invokable-scripts/). - + description: | + Retrieves a list of [scripts](/influxdb/cloud/api-guide/api-invokable-scripts/). #### Limitations - - - Paging with an `offset` greater than the number of records will result - in - + - Paging with an `offset` greater than the number of records will result in an empty response--for example: - The following request is paging to the 50th record, but the user has only + The following sample request pages to the 50th record, but the user has only created two scripts. - ```sh - $ curl --request GET "INFLUX_URL/api/v2/scripts?limit=1&offset=50" + ```sh + $ curl --request GET "INFLUX_URL/api/v2/scripts?limit=1&offset=50" - $ {"scripts":[]} - ``` + $ {"scripts":[]} + ``` - #### Related Guides + #### Related guides - - - [Invoke custom - scripts](/influxdb/cloud/api-guide/api-invokable-scripts/) + - [Invoke custom scripts](/influxdb/cloud/api-guide/api-invokable-scripts/) operationId: GetScripts parameters: - description: | - Limits the number of scripts returned. Default is `100`. + The maximum number of scripts to return. Default is `100`. in: query name: limit required: false @@ -13765,7 +13170,7 @@ paths: type: integer - description: | The offset for pagination. - The number of records to skip. + Skips the specified number of records in the result. in: query name: offset required: false @@ -13773,19 +13178,17 @@ paths: default: 0 minimum: 0 type: integer - - description: The name of the script. + - description: The script name. Lists scripts with the specified name. in: query name: name required: false schema: type: string - - description: > + - description: | A list of label names. - - Only returns scripts that have all these labels. - - To retrieve a script, each name you pass in `labelNames` must - exactly match the label for a script. + Only returns scripts that have all the specified labels. + To retrieve a script, each name you pass in `labelNames` must exactly + match the label for a script. in: query name: labelNames required: false @@ -13795,7 +13198,7 @@ paths: type: array - description: | A part of the label name. - Returns scripts that have a label that contains this phrase. + Returns scripts that have a label that contains the specified phrase. in: query name: labelContains required: false @@ -13815,9 +13218,7 @@ paths: language: flux name: getLastPointFromSampleBucket orgID: bea7ea952287f70d - script: >- - from(bucket: SampleBucket) |> range(start: -7d) |> - limit(n:1) + script: 'from(bucket: SampleBucket) |> range(start: -7d) |> limit(n:1)' updatedAt: '2022-07-17T23:49:45.731237Z' - createdAt: '2022-07-17T23:43:26.660308Z' description: getLastPoint finds the last point in a bucket @@ -13825,9 +13226,7 @@ paths: language: flux name: getLastPoint orgID: bea7ea952287f70d - script: >- - from(bucket: params.mybucket) |> range(start: -7d) |> - limit(n:1) + script: 'from(bucket: params.mybucket) |> range(start: -7d) |> limit(n:1)' updatedAt: '2022-07-17T23:43:26.660308Z' schema: $ref: '#/components/schemas/Scripts' @@ -13843,13 +13242,13 @@ paths: value: code: 3 details: [] - message: >- - parsing field "limit": strconv.ParseUint: parsing "-1": - invalid syntax + message: 'parsing field "limit": strconv.ParseUint: parsing "-1": invalid syntax' schema: $ref: '#/components/schemas/Error' description: | Bad request. + InfluxDB is unable to parse the request. + The response body contains detail about the error. '401': $ref: '#/components/responses/AuthorizationError' '500': @@ -13867,27 +13266,20 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request GET - "https://cloud2.influxdata.com/api/v2/scripts?limit=100&offset=0" \ + source: | + curl --request GET "https://cloud2.influxdata.com/api/v2/scripts?limit=100&offset=0" \ --header "Authorization: Token INFLUX_API_TOKEN" \ --header "Accept: application/json" \ --header "Content-Type: application/json" post: - description: > - Creates an [invokable - script](https://docs.influxdata.com/resources/videos/api-invokable-scripts/) + description: | + Creates an [invokable script](https://docs.influxdata.com/resources/videos/api-invokable-scripts/) + and returns the script. - and returns the created script. + #### Related guides - - #### Related Guides - - - - [Invokable scripts](/influxdb/cloud/api-guide/api-invokable-scripts/). - - - [Creating custom InfluxDB - endpoints](https://docs.influxdata.com/resources/videos/api-invokable-scripts/). + - [Invokable scripts](/influxdb/cloud/api-guide/api-invokable-scripts/) + - [Creating custom InfluxDB endpoints](https://docs.influxdata.com/resources/videos/api-invokable-scripts/) operationId: PostScripts requestBody: content: @@ -13909,9 +13301,7 @@ paths: language: flux name: getLastPoint orgID: bea7ea952287f70d - script: >- - from(bucket: params.mybucket) |> range(start: -7d) |> - limit(n:1) + script: 'from(bucket: params.mybucket) |> range(start: -7d) |> limit(n:1)' updatedAt: '2022-07-17T23:43:26.660308Z' schema: $ref: '#/components/schemas/Script' @@ -13919,32 +13309,7 @@ paths: Success. The response body contains the script and its metadata. '400': - content: - application/json: - examples: - invalidCharacterValue: - description: | - If the request body contains an invalid character, returns - `invalid` with detail about the problem. - summary: Invalid character - value: - code: invalid - details: [] - message: invalid character ',' looking for beginning of value - invalidJSONStringValue: - description: | - If the request body contains invalid JSON, returns `invalid` - with detail about the problem. - summary: Invalid JSON - value: - code: invalid - message: >- - invalid json: invalid character '\'' looking for beginning - of value - schema: - $ref: '#/components/schemas/Error' - description: | - Bad request. + $ref: '#/components/responses/BadRequestError' '401': $ref: '#/components/responses/AuthorizationError' '422': @@ -13989,10 +13354,12 @@ paths: }' /api/v2/scripts/{scriptID}: delete: - description: Deletes a script and all associated records. + description: Deletes a [script](/influxdb/cloud/api-guide/api-invokable-scripts/) and all associated records. operationId: DeleteScriptsID parameters: - - description: The ID of the script to delete. + - description: | + A script ID. + Deletes the specified script. in: path name: scriptID required: true @@ -14000,7 +13367,11 @@ paths: type: string responses: '204': - description: The script is deleted. + description: Success. The script is deleted. + '401': + $ref: '#/components/responses/AuthorizationError' + '500': + $ref: '#/components/responses/InternalServerError' default: $ref: '#/components/responses/ServerError' description: Unexpected error @@ -14008,10 +13379,13 @@ paths: tags: - Invokable Scripts get: - description: Uses script ID to retrieve details of an invokable script. + description: | + Retrieves a [script](/influxdb/cloud/api-guide/api-invokable-scripts/). operationId: GetScriptsID parameters: - - description: The script ID. + - description: | + A script ID. + Retrieves the specified script. in: path name: scriptID required: true @@ -14023,21 +13397,28 @@ paths: application/json: schema: $ref: '#/components/schemas/Script' - description: The requested script object. + description: Success. The response body contains the script. + '401': + $ref: '#/components/responses/AuthorizationError' + '500': + $ref: '#/components/responses/InternalServerError' default: $ref: '#/components/responses/ServerError' - description: Unexpected error + description: Unexpected error. summary: Retrieve a script tags: - Data I/O endpoints - Invokable Scripts patch: - description: > - Updates properties (`name`, `description`, and `script`) of an invokable - script. + description: | + Updates a [script](/influxdb/cloud/api-guide/api-invokable-scripts/) and returns the script. + + Use this endpoint to update the properties (`name`, `description`, and `script`) of an invokable script. operationId: PatchScriptsID parameters: - - description: The script ID. + - description: | + A script ID. + Updates the specified script. in: path name: scriptID required: true @@ -14048,7 +13429,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ScriptUpdateRequest' - description: Script update to apply + description: The script update to apply. required: true responses: '200': @@ -14056,7 +13437,13 @@ paths: application/json: schema: $ref: '#/components/schemas/Script' - description: The updated script. + description: Success. The response body contains the updated script. + '400': + $ref: '#/components/responses/BadRequestError' + '401': + $ref: '#/components/responses/AuthorizationError' + '500': + $ref: '#/components/responses/InternalServerError' default: $ref: '#/components/responses/ServerError' description: Unexpected error @@ -14065,49 +13452,38 @@ paths: - Invokable Scripts /api/v2/scripts/{scriptID}/invoke: post: - description: > - Invokes a script and substitutes `params` keys referenced in the script - with - - `params` key-values sent in the request body--for example: - - - The following script contains the parameter _`mybucket`_: + description: | + Runs a script and returns the result. + When the script runs, InfluxDB replaces `params` keys referenced in the script with + `params` key-values passed in the request body--for example: + The following sample script contains a _`mybucket`_ parameter : ```json - "script": "from(bucket: params.mybucket) |> range(start: -7d) |> limit(n:1)" ``` - The following example `POST /api/v2/scripts/SCRIPT_ID/invoke` request - body - - passes a value for _`mybucket`_: - + The following example `POST /api/v2/scripts/SCRIPT_ID/invoke` request body + passes a value for the _`mybucket`_ parameter: ```json - { "params": { "mybucket": "air_sensor" } } - ``` + #### Related guides - #### Related Guides - - - [Invoke custom - scripts](/influxdb/cloud/api-guide/api-invokable-scripts/) + - [Invoke custom scripts](/influxdb/cloud/api-guide/api-invokable-scripts/) operationId: PostScriptsIDInvoke parameters: - description: | - Script ID. - Only returns scripts with this ID. + A script ID. + Runs the specified script. in: path name: scriptID required: true @@ -14124,9 +13500,8 @@ paths: text/csv: examples: successResponse: - value: > + value: | ,result,table,_start,_stop,_time,_value,_field,_measurement,host - ,_result,0,2019-10-30T01:28:02.52716421Z,2022-07-26T01:28:02.52716421Z,2020-01-01T00:00:00Z,72.01,used_percent,mem,host2 schema: $ref: '#/components/schemas/ScriptHTTPResponseData' @@ -14146,6 +13521,8 @@ paths: $ref: '#/components/schemas/Error' description: | Bad request. + InfluxDB is unable to parse the request. + The response body contains detail about the error. headers: X-Platform-Error-Code: description: | @@ -14160,16 +13537,16 @@ paths: application/json: examples: bucketNotFound: + description: InfluxDB can't find the requested bucket. summary: | - The requested bucket was not found. + Bucket not found value: code: not found - message: >- - failed to initialize execute state: could not find bucket - "test-bucket" + message: 'failed to initialize execute state: could not find bucket "test-bucket"' scriptNotFound: + description: InfluxDB can't find the requested script. summary: | - The requested script was not found. + Script not found value: code: not found message: script "09afa3b220fe400" not found @@ -14199,9 +13576,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request POST - "https://cloud2.influxdata.com/api/v2/scripts/SCRIPT_ID/invoke" \ + source: | + curl --request POST "https://cloud2.influxdata.com/api/v2/scripts/SCRIPT_ID/invoke" \ --header "Authorization: Token INFLUX_TOKEN" \ --header 'Accept: application/csv' \ --header 'Content-Type: application/json' \ @@ -14212,10 +13588,14 @@ paths: }' /api/v2/scripts/{scriptID}/labels/add: patch: - description: Adds labels to a script and returns the updated script. + description: | + Adds labels to a [script](/influxdb/cloud/api-guide/api-invokable-scripts/) + and returns the script. operationId: PatchScriptsIDAddLabels parameters: - - description: The script ID. + - description: | + The script ID. + Adds labels to the specified script. in: path name: scriptID required: true @@ -14236,7 +13616,7 @@ paths: type: string type: array type: object - description: The names of labels to add to the script. + description: The labels to add to the script. required: true responses: '200': @@ -14248,8 +13628,7 @@ paths: Success. The response body contains the updated script. '400': - $ref: '#/components/responses/ServerError' - description: Bad request. + $ref: '#/components/responses/BadRequestError' '401': $ref: '#/components/responses/AuthorizationError' '404': @@ -14260,16 +13639,17 @@ paths: default: $ref: '#/components/responses/ServerError' description: Unexpected error. - summary: Adds labels to a script + summary: Add labels to a script tags: - - Data I/O endpoints - Invokable Scripts /api/v2/scripts/{scriptID}/labels/remove: patch: - description: Removes labels from a script and returns the updated script. + description: Removes labels from a script and returns the script. operationId: PatchScriptsIDRemoveLabels parameters: - - description: The script ID. + - description: | + A script ID. + Removes labels from the specified script. in: path name: scriptID required: true @@ -14290,7 +13670,7 @@ paths: type: string type: array type: object - description: The names of labels to remove from the script. + description: The labels to remove from the script. required: true responses: '200': @@ -14302,8 +13682,7 @@ paths: Success. The response body contains the updated script. '400': - $ref: '#/components/responses/ServerError' - description: Bad request. + $ref: '#/components/responses/BadRequestError' '401': $ref: '#/components/responses/AuthorizationError' '404': @@ -14314,15 +13693,12 @@ paths: default: $ref: '#/components/responses/ServerError' description: Unexpected error. - summary: Removes labels from a script + summary: Remove labels from a script tags: - - Data I/O endpoints - Invokable Scripts /api/v2/setup: get: - description: >- - Check if setup is allowed. Returns `true` if no default user, - organization, or bucket have been created. + description: Check if setup is allowed. Returns `true` if no default user, organization, or bucket have been created. operationId: GetSetup parameters: - $ref: '#/components/parameters/TraceSpan' @@ -14337,9 +13713,7 @@ paths: tags: - Setup post: - description: >- - Post an onboarding request to create an initial user, organization, and - bucket. + description: Post an onboarding request to create an initial user, organization, and bucket. operationId: PostSetup parameters: - $ref: '#/components/parameters/TraceSpan' @@ -14365,9 +13739,7 @@ paths: - Setup /api/v2/setup/user: post: - description: >- - Post an onboarding request to create a new user, organization, and - bucket. + description: Post an onboarding request to create a new user, organization, and bucket. operationId: PostSetupUser requestBody: content: @@ -14391,27 +13763,67 @@ paths: - Setup /api/v2/signin: post: - description: >- - Authenticates ***Basic Auth*** credentials for a user. If successful, - creates a new UI session for the user. + description: | + Authenticates [Basic authentication credentials](#section/Authentication/BasicAuthentication) + for a [user](/influxdb/cloud/reference/glossary/#user), + and then, if successful, generates a user session. + + To authenticate a user, pass the HTTP `Authorization` header with the + `Basic` scheme and the base64-encoded username and password--for example: + + ```sh + Authorization: Basic USERNAME:PASSWORD + ``` + + In InfluxDB Cloud, the username is the email address the user signed up with. + + _Note that many HTTP clients provide a Basic authentication option that + accepts the `USERNAME:PASSWORD` syntax and encodes the credentials before + sending the request. + To learn more about HTTP authentication, see + [Mozilla Developer Network (MDN) Web Docs, HTTP authentication](https://developer.mozilla.org/en-US/docs/Web/HTTP/Authentication)._ + + If authentication is successful, InfluxDB creates a new session for the user + and then returns the session cookie in the `Set-Cookie` response header. + User sessions exist only in memory. + They expire within ten minutes and during restarts of the InfluxDB instance. + + #### User sessions with authorizations + + - In InfluxDB Cloud, a user session inherits all the user's permissions for + the organization. + - In InfluxDB OSS, a user session inherits all the user's permissions for all + the organizations that the user belongs to. + + #### Related endpoints + + - [Signout](#tag/Signout) operationId: PostSignin parameters: - $ref: '#/components/parameters/TraceSpan' responses: '204': - description: Success. User authenticated. + description: | + Success. + The user is authenticated. + The `Set-Cookie` response header contains the session cookie. '401': content: application/json: schema: $ref: '#/components/schemas/Error' - description: Unauthorized access. + description: | + Unauthorized. + This error may be caused by one of the following problems: + - The user doesn't have access. + - The user passed incorrect credentials in the request. + - The credentials are formatted incorrectly in the request. '403': content: application/json: schema: $ref: '#/components/schemas/Error' - description: User account is disabled. + description: Forbidden. The user account is disabled. default: content: application/json: @@ -14423,6 +13835,12 @@ paths: summary: Create a user session. tags: - Signin + x-codeSamples: + - label: 'cURL: signin with --user option encoding' + lang: Shell + source: | + curl --request POST http://localhost:8086/api/v2/signin \ + --user "USERNAME:PASSWORD" /api/v2/signout: post: description: Expires the current UI session for the user. @@ -14470,21 +13888,15 @@ paths: required: true schema: type: string - - description: > + - description: | The stack name. - Finds stack `events` with this name and returns the stacks. - Repeatable. - To filter for more than one stack name, - repeat this parameter with each name--for example: - - - - `http://localhost:8086/api/v2/stacks?&orgID=INFLUX_ORG_ID&name=project-stack-0&name=project-stack-1` + - `http://localhost:8086/api/v2/stacks?&orgID=INFLUX_ORG_ID&name=project-stack-0&name=project-stack-1` examples: findStackByName: summary: Find stacks with the event name @@ -14493,21 +13905,15 @@ paths: name: name schema: type: string - - description: > + - description: | The stack ID. - Only returns stacks with this ID. - Repeatable. - To filter for more than one stack ID, - repeat this parameter with each ID--for example: - - - - `http://localhost:8086/api/v2/stacks?&orgID=INFLUX_ORG_ID&stackID=09bd87cd33be3000&stackID=09bef35081fe3000` + - `http://localhost:8086/api/v2/stacks?&orgID=INFLUX_ORG_ID&stackID=09bd87cd33be3000&stackID=09bef35081fe3000` examples: findStackByID: summary: Find a stack with the ID @@ -14536,29 +13942,21 @@ paths: summary: The orgID query parameter is missing value: code: invalid - message: >- - organization id[""] is invalid: id must have a length of - 16 bytes + message: 'organization id[""] is invalid: id must have a length of 16 bytes' orgProvidedNotFound: - summary: >- - The org or orgID passed doesn't own the token passed in the - header + summary: The org or orgID passed doesn't own the token passed in the header value: code: invalid message: 'failed to decode request body: organization not found' schema: $ref: '#/components/schemas/Error' - description: > + description: | Bad request. - The response body contains detail about the error. - #### InfluxDB OSS - - - Returns this error if an incorrect value is passed for `org` or - `orgID`. + - Returns this error if an incorrect value is passed for `org` or `orgID`. '401': $ref: '#/components/responses/AuthorizationError' '500': @@ -14573,13 +13971,10 @@ paths: tags: - Templates post: - description: > + description: | Creates or initializes a stack. - - Use this endpoint to _manually_ initialize a new stack with the - following - + Use this endpoint to _manually_ initialize a new stack with the following optional information: - Stack name @@ -14587,23 +13982,16 @@ paths: - URLs for template manifest files To automatically create a stack when applying templates, - use the [/api/v2/templates/apply endpoint](#operation/ApplyTemplate). - #### Required permissions - - `write` permission for the organization - #### Related guides - - [InfluxDB stacks](/influxdb/cloud/influxdb-templates/stacks/) - - - [Use InfluxDB - templates](/influxdb/cloud/influxdb-templates/use/#apply-templates-to-an-influxdb-instance) + - [Use InfluxDB templates](/influxdb/cloud/influxdb-templates/use/#apply-templates-to-an-influxdb-instance) operationId: CreateStack requestBody: content: @@ -14638,16 +14026,12 @@ paths: application/json: schema: $ref: '#/components/schemas/Error' - description: > + description: | Unprocessable entity. - The error may indicate one of the following problems: - - - The request body isn't valid--the request is well-formed, but - InfluxDB can't process it due to semantic errors. - + - The request body isn't valid--the request is well-formed, but InfluxDB can't process it due to semantic errors. - You passed a parameter combination that InfluxDB doesn't support. '500': $ref: '#/components/responses/InternalServerError' @@ -14800,73 +14184,67 @@ paths: - Templates /api/v2/tasks: get: - description: > - Retrieves a list of [tasks](/influxdb/cloud/process-data/). + description: | + Retrieves a list of [tasks](/influxdb/cloud/reference/glossary/#task). - - To limit which tasks are returned, pass query parameters in your - request. - - If no query parameters are passed, InfluxDB returns all tasks up to the - default `limit`. + To limit which tasks are returned, pass query parameters in your request. + If no query parameters are passed, InfluxDB returns all tasks up to the default `limit`. operationId: GetTasks parameters: - $ref: '#/components/parameters/TraceSpan' - description: | - Task name. - Only returns tasks with this name. + A [task](/influxdb/cloud/reference/glossary/#task) name. + Only returns tasks with the specified name. Different tasks may have the same name. in: query name: name schema: type: string - description: | - Task ID. - Only returns tasks created after this task. + A [task](/influxdb/cloud/reference/glossary/#task) ID. + Only returns tasks created after the specified task. in: query name: after schema: type: string - description: | - User ID. - Only returns tasks owned by this user. + A [user](/influxdb/cloud/reference/glossary/#user) ID. + Only returns tasks owned by the specified user. in: query name: user schema: type: string - description: | - Organization name. - Only returns tasks owned by this organization. + An [organization](/influxdb/cloud/reference/glossary/#organization) name. + Only returns tasks owned by the specified organization. in: query name: org schema: type: string - description: | - Organization ID. - Only returns tasks owned by this organization. + An [organization](/influxdb/cloud/reference/glossary/#organization) ID. + Only returns tasks owned by the specified organization. in: query name: orgID schema: type: string - - in: query + - description: | + A [task](/influxdb/cloud/reference/glossary/#task) status. + Only returns tasks that have the specified status (`active` or `inactive`). + in: query name: status schema: - description: | - Task status (`active` or `inactive`). - Only returns tasks with this status. enum: - active - inactive type: string - - description: > - Limits the number of tasks returned. Default is `100`. + - description: | + The maximum number of [tasks](/influxdb/cloud/reference/glossary/#task) to return. + Default is `100`. + The minimum is `1` and the maximum is `500`. - - To reduce the payload size, combine _`type=basic`_ and _`limit`_ - (see _Request samples_) - - For more information about the `basic` response, see the _`type`_ - parameter. + To reduce the payload size, combine _`type=basic`_ and _`limit`_ (see _Request samples_). + For more information about the `basic` response, see the _`type`_ parameter. examples: all: summary: Return all tasks, without pagination. @@ -14890,8 +14268,8 @@ paths: minimum: 0 type: integer - description: | - The field used for sorting records in the list. - Only `name` is supported. + The sort field. Only `name` is supported. + Specifies the field used to sort records in the list. in: query name: sortBy required: false @@ -14899,15 +14277,12 @@ paths: enum: - name type: string - - description: > - Task type (`basic` or `system`). - - - The default (`system`) response contains all the metadata properties - for tasks. - - To reduce the payload size, pass `basic` to omit some task - properties (`flux`, `createdAt`, `updatedAt`) from the response. + - description: | + A [task](/influxdb/cloud/reference/glossary/#task) type (`basic` or `system`). + Default is `system`. + Specifies the level of detail for tasks in the response. + The default (`system`) response contains all the metadata properties for tasks. + To reduce the response size, pass `basic` to omit some task properties (`flux`, `createdAt`, `updatedAt`). in: query name: type required: false @@ -14918,8 +14293,8 @@ paths: - system type: string - description: | - Script ID. - Only returns tasks that use this script. + A [script](#tag/Invokable-Scripts) ID. + Only returns tasks that use the specified invokable script. in: query name: scriptID schema: @@ -14928,6 +14303,71 @@ paths: '200': content: application/json: + examples: + basicTypeTaskOutput: + description: | + A sample response body for the `?type=basic` parameter. + `type=basic` omits some task fields (`createdAt` and `updatedAt`) + and field values (`org`, `flux`) in the response. + summary: Basic output + value: + links: + self: /api/v2/tasks?limit=100 + tasks: + - every: 30m + flux: '' + id: 09956cbb6d378000 + labels: [] + lastRunStatus: success + latestCompleted: '2022-06-30T15:00:00Z' + links: + labels: /api/v2/tasks/09956cbb6d378000/labels + logs: /api/v2/tasks/09956cbb6d378000/logs + members: /api/v2/tasks/09956cbb6d378000/members + owners: /api/v2/tasks/09956cbb6d378000/owners + runs: /api/v2/tasks/09956cbb6d378000/runs + self: /api/v2/tasks/09956cbb6d378000 + name: task1 + org: '' + orgID: 48c88459ee424a04 + ownerID: 0772396d1f411000 + status: active + systemTypeTaskOutput: + description: | + A sample response body for the `?type=system` parameter. + `type=system` returns all task fields. + summary: System output + value: + links: + self: /api/v2/tasks?limit=100 + tasks: + - createdAt: '2022-06-27T15:09:06Z' + description: IoT Center 90-day environment average. + every: 30m + flux: |- + option task = {name: "task1", every: 30m} + + from(bucket: "iot_center") + |> range(start: -90d) + |> filter(fn: (r) => r._measurement == "environment") + |> aggregateWindow(every: 1h, fn: mean) + id: 09956cbb6d378000 + labels: [] + lastRunStatus: success + latestCompleted: '2022-06-30T15:00:00Z' + links: + labels: /api/v2/tasks/09956cbb6d378000/labels + logs: /api/v2/tasks/09956cbb6d378000/logs + members: /api/v2/tasks/09956cbb6d378000/members + owners: /api/v2/tasks/09956cbb6d378000/owners + runs: /api/v2/tasks/09956cbb6d378000/runs + self: /api/v2/tasks/09956cbb6d378000 + name: task1 + org: my-iot-center + orgID: 48c88459ee424a04 + ownerID: 0772396d1f411000 + status: active + updatedAt: '2022-06-28T18:10:15Z' schema: $ref: '#/components/schemas/Tasks' description: | @@ -14946,78 +14386,67 @@ paths: x-codeSamples: - label: 'cURL: all tasks, basic output' lang: Shell - source: > - curl https://cloud2.influxdata.com/api/v2/tasks/?limit=-1&type=basic - \ + source: | + curl https://cloud2.influxdata.com/api/v2/tasks/?limit=-1&type=basic \ --header 'Content-Type: application/json' \ --header 'Authorization: Token INFLUX_API_TOKEN' post: - description: > - Creates a [task](/influxdb/cloud/process-data/) and returns the created - task. - + description: | + Creates a [task](/influxdb/cloud/reference/glossary/#task) and returns the task. Use this endpoint to create a scheduled task that runs a Flux script. - #### InfluxDB Cloud - - You can use either `flux` or `scriptID` to provide the task script. - `flux`: a string of "raw" Flux that contains task options and the script--for example: - ```json - { - "flux": "option task = {name: \"CPU Total 1 Hour New\", every: 1h}\ - from(bucket: \"telegraf\") - |> range(start: -1h) - |> filter(fn: (r) => (r._measurement == \"cpu\")) - |> filter(fn: (r) =>\n\t\t(r._field == \"usage_system\")) - |> filter(fn: (r) => (r.cpu == \"cpu-total\")) - |> aggregateWindow(every: 1h, fn: max) - |> to(bucket: \"cpu_usage_user_total_1h\", org: \"INFLUX_ORG\")", - "status": "active", - "description": "This task downsamples CPU data every hour" - } - ``` + ```json + { + "flux": "option task = {name: \"CPU Total 1 Hour New\", every: 1h}\ + from(bucket: \"telegraf\") + |> range(start: -1h) + |> filter(fn: (r) => (r._measurement == \"cpu\")) + |> filter(fn: (r) =>\n\t\t(r._field == \"usage_system\")) + |> filter(fn: (r) => (r.cpu == \"cpu-total\")) + |> aggregateWindow(every: 1h, fn: max) + |> to(bucket: \"cpu_usage_user_total_1h\", org: \"INFLUX_ORG\")", + "status": "active", + "description": "This task downsamples CPU data every hour" + } + ``` - `scriptID`: the ID of an [invokable script](#tag/Invokable-Scripts) for the task to run. To pass task options when using `scriptID`, pass the options as properties in the request body--for example: - ```json - { - "name": "CPU Total 1 Hour New", - "description": "This task downsamples CPU data every hour", - "every": "1h", - "scriptID": "SCRIPT_ID", - "scriptParameters": - { - "rangeStart": "-1h", - "bucket": "telegraf", - "filterField": "cpu-total" - } + ```json + { + "name": "CPU Total 1 Hour New", + "description": "This task downsamples CPU data every hour", + "every": "1h", + "scriptID": "SCRIPT_ID", + "scriptParameters": + { + "rangeStart": "-1h", + "bucket": "telegraf", + "filterField": "cpu-total" } - ``` + } + ``` #### Limitations: - - You can't use `flux` and `scriptID` for the same task. + - You can't use `flux` and `scriptID` for the same task. #### Related guides - - [Get started with tasks](/influxdb/cloud/process-data/get-started/) - - - [Create a - task](/influxdb/cloud/process-data/manage-tasks/create-task/) - + - [Create a task](/influxdb/cloud/process-data/manage-tasks/create-task/) - [Common tasks](/influxdb/cloud/process-data/common-tasks/) - - - [Task configuration - options](/influxdb/cloud/process-data/task-options/) + - [Task configuration options](/influxdb/cloud/process-data/task-options/) operationId: PostTasks parameters: - $ref: '#/components/parameters/TraceSpan' @@ -15034,9 +14463,7 @@ paths: application/json: schema: $ref: '#/components/schemas/Task' - description: >- - Success. The response body contains a `tasks` list with the new - task. + description: Success. The response body contains a `tasks` list with the new task. '400': content: application/json: @@ -15045,13 +14472,9 @@ paths: summary: The request body can't contain both flux and scriptID value: code: invalid - message: >- - failed to decode request: can not provide both scriptID - and flux + message: 'failed to decode request: can not provide both scriptID and flux' missingFluxError: - summary: >- - The request body requires either a flux parameter or - scriptID parameter + summary: The request body requires either a flux parameter or scriptID parameter value: code: invalid message: 'failed to decode request: flux required' @@ -15063,8 +14486,8 @@ paths: #### InfluxDB Cloud - - Returns this error if the task doesn't contain one of _`flux`_ or _`scriptID`_. - - Returns this error if the task contains _`flux`_ _and_ _`scriptID`_. + - Returns this error if the task doesn't contain one of _`flux`_ or _`scriptID`_. + - Returns this error if the task contains _`flux`_ _and_ _`scriptID`_. '401': $ref: '#/components/responses/AuthorizationError' '500': @@ -15119,23 +14542,17 @@ paths: EOF /api/v2/tasks/{taskID}: delete: - description: > - Deletes a task and associated records. + description: | + Deletes a [task](/influxdb/cloud/reference/glossary/#task) and associated records. + Use this endpoint to delete a task and all associated records (task runs, logs, and labels). + Once the task is deleted, InfluxDB cancels all scheduled runs of the task. - Use this endpoint to delete a task and all associated records (task - runs, logs, and labels). - - Once the task is deleted, InfluxDB cancels all scheduled runs of the - task. - - - If you want to disable a task instead of delete it, [update the task - status to `inactive`](#operation/PatchTasksID). + If you want to disable a task instead of delete it, [update the task status to `inactive`](#operation/PatchTasksID). operationId: DeleteTasksID parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The ID of the task to delete. + - description: A [task](/influxdb/cloud/reference/glossary/#task) ID. Specifies the task to delete. in: path name: taskID required: true @@ -15143,7 +14560,7 @@ paths: type: string responses: '204': - description: Success. The task and runs are deleted. Scheduled runs are canceled. + description: Success. The task and task runs are deleted. Scheduled runs are canceled. '400': $ref: '#/components/responses/BadRequestError' '401': @@ -15163,7 +14580,9 @@ paths: operationId: GetTasksID parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The ID of the task to retrieve. + - description: | + A [task](/influxdb/cloud/reference/glossary/#task) ID. + Specifies the task to retrieve. in: path name: taskID required: true @@ -15191,76 +14610,67 @@ paths: - Data I/O endpoints - Tasks patch: - description: > - Updates a task and then cancels all scheduled runs of the task. + description: | + Updates a [task](/influxdb/cloud/reference/glossary/#task), + and then cancels all scheduled runs of the task. + Use this endpoint to set, modify, or clear task properties--for example: `cron`, `name`, `flux`, `status`. + Once InfluxDB applies the update, it cancels all previously scheduled runs of the task. - Use this endpoint to set, modify, and clear task properties (for - example: `cron`, `name`, `flux`, `status`). - - Once InfluxDB applies the update, it cancels all previously scheduled - runs of the task. - - - To update a task, pass an object that contains the updated key-value - pairs. - + To update a task, pass an object that contains the updated key-value pairs. To activate or inactivate a task, set the `status` property. - - _`"status": "inactive"`_ cancels scheduled runs and prevents manual runs - of the task. - + _`"status": "inactive"`_ cancels scheduled runs and prevents manual runs of the task. #### InfluxDB Cloud - - - You can use either `flux` or `scriptID` to provide the task script. + - Use either `flux` or `scriptID` to provide the task script. - `flux`: a string of "raw" Flux that contains task options and the script--for example: - ```json - { - "flux": "option task = {name: \"CPU Total 1 Hour New\", every: 1h}\ - from(bucket: \"telegraf\") - |> range(start: -1h) - |> filter(fn: (r) => (r._measurement == \"cpu\")) - |> filter(fn: (r) =>\n\t\t(r._field == \"usage_system\")) - |> filter(fn: (r) => (r.cpu == \"cpu-total\")) - |> aggregateWindow(every: 1h, fn: max) - |> to(bucket: \"cpu_usage_user_total_1h\", org: \"INFLUX_ORG\")", - "status": "active", - "description": "This task downsamples CPU data every hour" - } - ``` + ```json + { + "flux": "option task = {name: \"CPU Total 1 Hour New\", every: 1h}\ + from(bucket: \"telegraf\") + |> range(start: -1h) + |> filter(fn: (r) => (r._measurement == \"cpu\")) + |> filter(fn: (r) =>\n\t\t(r._field == \"usage_system\")) + |> filter(fn: (r) => (r.cpu == \"cpu-total\")) + |> aggregateWindow(every: 1h, fn: max) + |> to(bucket: \"cpu_usage_user_total_1h\", org: \"INFLUX_ORG\")", + "status": "active", + "description": "This task downsamples CPU data every hour" + } + ``` - `scriptID`: the ID of an [invokable script](#tag/Invokable-Scripts) for the task to run. To pass task options when using `scriptID`, pass the options as properties in the request body--for example: - ```json - { - "name": "CPU Total 1 Hour New", - "description": "This task downsamples CPU data every hour", - "every": "1h", - "scriptID": "SCRIPT_ID", - "scriptParameters": - { - "rangeStart": "-1h", - "bucket": "telegraf", - "filterField": "cpu-total" - } + ```json + { + "name": "CPU Total 1 Hour New", + "description": "This task downsamples CPU data every hour", + "every": "1h", + "scriptID": "SCRIPT_ID", + "scriptParameters": + { + "rangeStart": "-1h", + "bucket": "telegraf", + "filterField": "cpu-total" } - ``` + } + ``` #### Limitations: - - You can't use `flux` and `scriptID` for the same task. operationId: PatchTasksID parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The ID of the task to update. + - description: | + A [task](/influxdb/cloud/reference/glossary/#task) ID. + Specifies the task to update. in: path name: taskID required: true @@ -15271,7 +14681,7 @@ paths: application/json: schema: $ref: '#/components/schemas/TaskUpdateRequest' - description: An object that contains updated task properties to apply. + description: An task update to apply. required: true responses: '200': @@ -15314,9 +14724,7 @@ paths: application/json: schema: $ref: '#/components/schemas/LabelsResponse' - description: >- - Success. The response body contains a list of all labels for the - task. + description: Success. The response body contains a list of all labels for the task. '400': $ref: '#/components/responses/BadRequestError' '401': @@ -15331,12 +14739,10 @@ paths: tags: - Tasks post: - description: > + description: | Adds a label to a task. - - Use this endpoint to add a label that you can use to filter tasks in the - InfluxDB UI. + Use this endpoint to add a label that you can use to filter tasks in the InfluxDB UI. operationId: PostTasksIDLabels parameters: - $ref: '#/components/parameters/TraceSpan' @@ -15359,9 +14765,7 @@ paths: application/json: schema: $ref: '#/components/schemas/LabelResponse' - description: >- - Success. The response body contains a list of all labels for the - task. + description: Success. The response body contains a list of all labels for the task. '400': $ref: '#/components/responses/BadRequestError' '401': @@ -15412,20 +14816,13 @@ paths: - Tasks /api/v2/tasks/{taskID}/logs: get: - description: > - Retrieves a list of all logs for a - [task](/influxdb/cloud/reference/glossary/#task). - - - When an InfluxDB task runs, a “run” record is created in the task’s - history. - - Logs associated with each run provide relevant log messages, timestamps, - and the exit status of the run attempt. + description: | + Retrieves a list of all logs for a [task](/influxdb/cloud/reference/glossary/#task). + When an InfluxDB task runs, a “run” record is created in the task’s history. + Logs associated with each run provide relevant log messages, timestamps, and the exit status of the run attempt. Use this endpoint to retrieve only the log events for a task, - without additional task metadata. operationId: GetTasksIDLogs parameters: @@ -15445,32 +14842,20 @@ paths: summary: Events for a failed task run. value: events: - - message: >- - Started task from script: "option task = {name: \"test - task\", every: 3d, offset: 0s}" + - message: 'Started task from script: "option task = {name: \"test task\", every: 3d, offset: 0s}"' runID: 09a946fc3167d000 time: '2022-07-13T07:06:54.198167Z' - message: Completed(failed) runID: 09a946fc3167d000 time: '2022-07-13T07:07:13.104037Z' - - message: >- - error exhausting result iterator: error in query - specification while starting program: this Flux script - returns no streaming data. Consider adding a "yield" - or invoking streaming functions directly, without - performing an assignment + - message: 'error exhausting result iterator: error in query specification while starting program: this Flux script returns no streaming data. Consider adding a "yield" or invoking streaming functions directly, without performing an assignment' runID: 09a946fc3167d000 time: '2022-07-13T08:24:37.115323Z' taskSuccess: summary: Events for a successful task run. value: events: - - message: >- - Started task from script: "option task = {name: - \"task1\", every: 30m} from(bucket: \"iot_center\") |> - range(start: -90d) |> filter(fn: (r) => r._measurement - == \"environment\") |> aggregateWindow(every: 1h, fn: - mean)" + - message: 'Started task from script: "option task = {name: \"task1\", every: 30m} from(bucket: \"iot_center\") |> range(start: -90d) |> filter(fn: (r) => r._measurement == \"environment\") |> aggregateWindow(every: 1h, fn: mean)"' runID: 09b070dadaa7d000 time: '2022-07-18T14:46:07.101231Z' - message: Completed(success) @@ -15478,14 +14863,10 @@ paths: time: '2022-07-18T14:46:07.242859Z' schema: $ref: '#/components/schemas/Logs' - description: > - Success. The response body contains an `events` list with logs for - the task. - + description: | + Success. The response body contains an `events` list with logs for the task. Each log event `message` contains detail about the event. - - If a task run fails, InfluxDB logs an event with the reason for the - failure. + If a task run fails, InfluxDB logs an event with the reason for the failure. '400': $ref: '#/components/responses/BadRequestError' '401': @@ -15502,11 +14883,9 @@ paths: /api/v2/tasks/{taskID}/members: get: deprecated: true - description: > + description: | **Deprecated**: Tasks don't use `owner` and `member` roles. - - Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user - permissions. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. operationId: GetTasksIDMembers parameters: - $ref: '#/components/parameters/TraceSpan' @@ -15536,17 +14915,11 @@ paths: - Tasks post: deprecated: true - description: > + description: | **Deprecated**: Tasks don't use `owner` and `member` roles. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. - Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user - permissions. - - - Adds a user to members of a task and returns the newly created member - with - - role and user detail. + Adds a user to members of a task and returns the member. operationId: PostTasksIDMembers parameters: - $ref: '#/components/parameters/TraceSpan' @@ -15582,11 +14955,9 @@ paths: /api/v2/tasks/{taskID}/members/{userID}: delete: deprecated: true - description: > + description: | **Deprecated**: Tasks don't use `owner` and `member` roles. - - Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user - permissions. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. operationId: DeleteTasksIDMembersID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -15617,12 +14988,9 @@ paths: /api/v2/tasks/{taskID}/owners: get: deprecated: true - description: > + description: | **Deprecated**: Tasks don't use `owner` and `member` roles. - - Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user - permissions. - + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. Retrieves all users that have owner permission for a task. operationId: GetTasksIDOwners @@ -15640,15 +15008,11 @@ paths: application/json: schema: $ref: '#/components/schemas/ResourceOwners' - description: > + description: | Success. + The response contains a list of `users` that have the `owner` role for the task. - The response contains a list of `users` that have the `owner` role - for the task. - - - If the task has no owners, the response contains an empty `users` - array. + If the task has no owners, the response contains an empty `users` array. '401': $ref: '#/components/responses/AuthorizationError' '422': @@ -15656,16 +15020,12 @@ paths: application/json: schema: $ref: '#/components/schemas/Error' - description: > + description: | Unprocessable entity. - The error may indicate one of the following problems: - - - The request body isn't valid--the request is well-formed, but - InfluxDB can't process it due to semantic errors. - + - The request body isn't valid--the request is well-formed, but InfluxDB can't process it due to semantic errors. - You passed a parameter combination that InfluxDB doesn't support. '500': $ref: '#/components/responses/InternalServerError' @@ -15680,18 +15040,13 @@ paths: - Tasks post: deprecated: true - description: > + description: | **Deprecated**: Tasks don't use `owner` and `member` roles. - - Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user - permissions. - + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. Assigns a task `owner` role to a user. - Use this endpoint to create a _resource owner_ for the task. - A _resource owner_ is a user with `role: owner` for a specific resource. operationId: PostTasksIDOwners parameters: @@ -15737,16 +15092,12 @@ paths: application/json: schema: $ref: '#/components/schemas/Error' - description: > + description: | Unprocessable entity. - The error may indicate one of the following problems: - - - The request body isn't valid--the request is well-formed, but - InfluxDB can't process it due to semantic errors. - + - The request body isn't valid--the request is well-formed, but InfluxDB can't process it due to semantic errors. - You passed a parameter combination that InfluxDB doesn't support. '500': $ref: '#/components/responses/InternalServerError' @@ -15762,11 +15113,9 @@ paths: /api/v2/tasks/{taskID}/owners/{userID}: delete: deprecated: true - description: > + description: | **Deprecated**: Tasks don't use `owner` and `member` roles. - - Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user - permissions. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. operationId: DeleteTasksIDOwnersID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -15796,15 +15145,11 @@ paths: - Tasks /api/v2/tasks/{taskID}/runs: get: - description: > + description: | Retrieves a list of runs for a [task](/influxdb/cloud/process-data/). - - To limit which task runs are returned, pass query parameters in your - request. - - If no query parameters are passed, InfluxDB returns all task runs up to - the default `limit`. + To limit which task runs are returned, pass query parameters in your request. + If no query parameters are passed, InfluxDB returns all task runs up to the default `limit`. operationId: GetTasksIDRuns parameters: - $ref: '#/components/parameters/TraceSpan' @@ -15830,20 +15175,16 @@ paths: maximum: 500 minimum: 1 type: integer - - description: > - A timestamp ([RFC3339 date/time - format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp)). - + - description: | + A timestamp ([RFC3339 date/time format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp)). Only returns runs scheduled after this time. in: query name: afterTime schema: format: date-time type: string - - description: > - A timestamp ([RFC3339 date/time - format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp)). - + - description: | + A timestamp ([RFC3339 date/time format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp)). Only returns runs scheduled before this time. in: query name: beforeTime @@ -15867,22 +15208,15 @@ paths: tags: - Tasks post: - description: > + description: | Schedules a task run to start immediately, ignoring scheduled runs. - Use this endpoint to manually start a task run. - Scheduled runs will continue to run as scheduled. - This may result in concurrently running tasks. - To _retry_ a previous run (and avoid creating a new run), - - use the [`POST - /api/v2/tasks/{taskID}/runs/{runID}/retry`](#operation/PostTasksIDRunsIDRetry) - endpoint. + use the [`POST /api/v2/tasks/{taskID}/runs/{runID}/retry` endpoint](#operation/PostTasksIDRunsIDRetry). operationId: PostTasksIDRuns parameters: - $ref: '#/components/parameters/TraceSpan' @@ -15976,10 +15310,8 @@ paths: tags: - Tasks get: - description: > - Retrieves a specific run for a - [task](/influxdb/cloud/reference/glossary/#task). - + description: | + Retrieves a specific run for a [task](/influxdb/cloud/reference/glossary/#task). Use this endpoint to retrieve detail and logs for a specific task run. operationId: GetTasksIDRunsID @@ -16008,19 +15340,12 @@ paths: finishedAt: '2022-07-18T14:46:07.308254Z' id: 09b070dadaa7d000 links: - logs: >- - /api/v2/tasks/0996e56b2f378000/runs/09b070dadaa7d000/logs - retry: >- - /api/v2/tasks/0996e56b2f378000/runs/09b070dadaa7d000/retry + logs: /api/v2/tasks/0996e56b2f378000/runs/09b070dadaa7d000/logs + retry: /api/v2/tasks/0996e56b2f378000/runs/09b070dadaa7d000/retry self: /api/v2/tasks/0996e56b2f378000/runs/09b070dadaa7d000 task: /api/v2/tasks/0996e56b2f378000 log: - - message: >- - Started task from script: "option task = {name: - \"task1\", every: 30m} from(bucket: \"iot_center\") |> - range(start: -90d) |> filter(fn: (r) => r._measurement - == \"environment\") |> - aggregateWindow(every: 1h, fn: mean)" + - message: 'Started task from script: "option task = {name: \"task1\", every: 30m} from(bucket: \"iot_center\") |> range(start: -90d) |> filter(fn: (r) => r._measurement == \"environment\") |> aggregateWindow(every: 1h, fn: mean)"' runID: 09b070dadaa7d000 time: '2022-07-18T14:46:07.101231Z' - message: Completed(success) @@ -16049,15 +15374,11 @@ paths: - Tasks /api/v2/tasks/{taskID}/runs/{runID}/logs: get: - description: > + description: | Retrieves all logs for a task run. + A log is a list of run events with `runID`, `time`, and `message` properties. - A log is a list of run events with `runID`, `time`, and `message` - properties. - - - Use this endpoint to help analyze task performance and troubleshoot - failed task runs. + Use this endpoint to help analyze task performance and troubleshoot failed task runs. operationId: GetTasksIDRunsIDLogs parameters: - $ref: '#/components/parameters/TraceSpan' @@ -16082,32 +15403,20 @@ paths: summary: Events for a failed task. value: events: - - message: >- - Started task from script: "option task = {name: \"test - task\", every: 3d, offset: 0s}" + - message: 'Started task from script: "option task = {name: \"test task\", every: 3d, offset: 0s}"' runID: 09a946fc3167d000 time: '2022-07-13T07:06:54.198167Z' - message: Completed(failed) runID: 09a946fc3167d000 time: '2022-07-13T07:07:13.104037Z' - - message: >- - error exhausting result iterator: error in query - specification while starting program: this Flux script - returns no streaming data. Consider adding a "yield" - or invoking streaming functions directly, without - performing an assignment + - message: 'error exhausting result iterator: error in query specification while starting program: this Flux script returns no streaming data. Consider adding a "yield" or invoking streaming functions directly, without performing an assignment' runID: 09a946fc3167d000 time: '2022-07-13T08:24:37.115323Z' taskSuccess: summary: Events for a successful task run. value: events: - - message: >- - Started task from script: "option task = {name: - \"task1\", every: 30m} from(bucket: \"iot_center\") |> - range(start: -90d) |> filter(fn: (r) => r._measurement - == \"environment\") |> aggregateWindow(every: 1h, fn: - mean)" + - message: 'Started task from script: "option task = {name: \"task1\", every: 30m} from(bucket: \"iot_center\") |> range(start: -90d) |> filter(fn: (r) => r._measurement == \"environment\") |> aggregateWindow(every: 1h, fn: mean)"' runID: 09b070dadaa7d000 time: '2022-07-18T14:46:07.101231Z' - message: Completed(success) @@ -16115,14 +15424,10 @@ paths: time: '2022-07-18T14:46:07.242859Z' schema: $ref: '#/components/schemas/Logs' - description: > - Success. The response body contains an `events` list with logs for - the task run. - + description: | + Success. The response body contains an `events` list with logs for the task run. Each log event `message` contains detail about the event. - - If a run fails, InfluxDB logs an event with the reason for the - failure. + If a run fails, InfluxDB logs an event with the reason for the failure. '400': $ref: '#/components/responses/BadRequestError' '401': @@ -16138,28 +15443,34 @@ paths: - Tasks /api/v2/tasks/{taskID}/runs/{runID}/retry: post: - description: > - Queues a task run to retry and returns the newly scheduled run. - - - To manually start a _new_ task run, use the [`POST - /api/v2/tasks/{taskID}/runs`](#operation/PostTasksIDRuns) endpoint. + description: | + Queues a [task](/influxdb/cloud/reference/glossary/#task) run to + retry and returns the scheduled run. + To manually start a _new_ task run, use the + [`POST /api/v2/tasks/{taskID}/runs` endpoint](#operation/PostTasksIDRuns). #### Limitations - - The task must be _active_ (`status: "active"`). operationId: PostTasksIDRunsIDRetry parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The ID of the task to retry. + - description: | + A [task](/influxdb/cloud/reference/glossary/#task) ID. + Specifies the task to retry. in: path name: taskID required: true schema: type: string - - description: The ID of the task run to retry. + - description: | + A [task](/influxdb/cloud/reference/glossary/#task) run ID. + Specifies the task run to retry. + + To find a task run ID, use the + [`GET /api/v2/tasks/{taskID}/runs` endpoint](#operation/GetTasksIDRuns) + to list task runs. in: path name: runID required: true @@ -16180,10 +15491,8 @@ paths: value: id: 09d60ffe08738000 links: - logs: >- - /api/v2/tasks/09a776832f381000/runs/09d60ffe08738000/logs - retry: >- - /api/v2/tasks/09a776832f381000/runs/09d60ffe08738000/retry + logs: /api/v2/tasks/09a776832f381000/runs/09d60ffe08738000/logs + retry: /api/v2/tasks/09a776832f381000/runs/09d60ffe08738000/retry self: /api/v2/tasks/09a776832f381000/runs/09d60ffe08738000 task: /api/v2/tasks/09a776832f381000 requestedAt: '2022-08-16T20:05:11.84145Z' @@ -16639,9 +15948,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ResourceOwner' - description: >- - Telegraf configuration owner was added. Returns a ResourceOwner that - references the User. + description: Telegraf configuration owner was added. Returns a ResourceOwner that references the User. default: content: application/json: @@ -16682,44 +15989,26 @@ paths: - Telegrafs /api/v2/templates/apply: post: - description: > + description: | Applies a template to - - create or update a [stack](/influxdb/cloud/influxdb-templates/stacks/) - of InfluxDB - + create or update a [stack](/influxdb/cloud/influxdb-templates/stacks/) of InfluxDB [resources](/influxdb/cloud/reference/cli/influx/export/all/#resources). - The response contains the diff of changes and the stack ID. - Use this endpoint to install an InfluxDB template to an organization. - Provide template URLs or template objects in your request. - To customize which template resources are installed, use the `actions` - parameter. - By default, when you apply a template, InfluxDB installs the template to - - create and update stack resources and then generates a diff of the - changes. - + create and update stack resources and then generates a diff of the changes. If you pass `dryRun: true` in the request body, InfluxDB validates the - template and generates the resource diff, but doesn’t make any - changes to your instance. - #### Custom values for templates - - - Some templates may contain [environment - references](/influxdb/cloud/influxdb-templates/create/#include-user-definable-resource-names) - for custom metadata. + - Some templates may contain [environment references](/influxdb/cloud/influxdb-templates/create/#include-user-definable-resource-names) for custom metadata. To provide custom values for environment references, pass the _`envRefs`_ property in the request body. For more information and examples, see how to @@ -16734,21 +16023,16 @@ paths: #### Required permissions - - `write` permissions for resource types in the template. - #### Rate limits (with InfluxDB Cloud) - - Adjustable service quotas apply. For more information, see [limits and adjustable quotas](/influxdb/cloud/account-management/limits/). #### Related guides - - [Use templates](/influxdb/cloud/influxdb-templates/use/) - - [Stacks](/influxdb/cloud/influxdb-templates/stacks/) operationId: ApplyTemplate requestBody: @@ -16848,16 +16132,11 @@ paths: application/json: schema: $ref: '#/components/schemas/TemplateSummary' - description: > + description: | Success. - The template applied successfully. - The response body contains the stack ID, a diff, and a summary. - - The diff compares the initial state to the state after the template - installation. - + The diff compares the initial state to the state after the template installation. The summary contains newly created resources. '422': content: @@ -17027,9 +16306,7 @@ paths: application/x-yaml: schema: $ref: '#/components/schemas/Template' - description: >- - The template was created successfully. Returns the newly created - template. + description: The template was created successfully. Returns the newly created template. default: content: application/json: @@ -17042,17 +16319,82 @@ paths: /api/v2/users: get: description: | - Retrieves a list of users. + Retrieves a list of [users](/influxdb/cloud/reference/glossary/#user). + + To limit which users are returned, pass query parameters in your request. + + #### InfluxDB Cloud + + - InfluxDB Cloud doesn't allow listing all users through the API. + Use the InfluxDB Cloud user interface (UI) to manage account information. + + #### Required permissions for InfluxDB Cloud + + | Action | Permission required | Restriction | + |:-------|:--------------------|:------------| + | List all users | Operator token | InfluxData internal use only | + | List a specific user | `read-users` or `read-user USER_ID` | + + Replace the following: + + - `USER_ID`: ID of the user that you want to retrieve. + + #### Related guides + + - [Manage users](/influxdb/cloud/organizations/users/) operationId: GetUsers parameters: - $ref: '#/components/parameters/TraceSpan' + - description: | + A user name. + Only lists the specified [user](/influxdb/cloud/reference/glossary/#user). + in: query + name: name + schema: + type: string + - description: | + A user id. + Only lists the specified [user](/influxdb/cloud/reference/glossary/#user). + in: query + name: id + schema: + type: string responses: '200': content: application/json: schema: $ref: '#/components/schemas/Users' - description: Success. The response contains a list of `users`. + description: | + Success. The response contains a list of `users`. + + #### InfluxDB Cloud + + - Returns an empty `users` list if you don't pass _`id`_ or _`name`_ parameters and don't use an + _operator token_. + Only InfluxData can access InfluxDB Cloud operator tokens. + '401': + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: | + Unauthorized. + '422': + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: | + Unprocessable entity. + + The error may indicate one of the following problems: + + - The request body isn't valid--the request is well-formed, + but InfluxDB can't process it due to semantic errors. + - You passed a parameter combination that InfluxDB doesn't support. + '500': + $ref: '#/components/responses/InternalServerError' default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error @@ -17062,7 +16404,25 @@ paths: - Users post: description: | - Creates a user and returns the newly created user. + (InfluxData internal use only) + + Creates and returns a [user](/influxdb/cloud/reference/glossary/#user) + that can access InfluxDB. + + #### InfluxDB Cloud + + - InfluxDB Cloud doesn't allow managing users through the API. + Use the InfluxDB Cloud user interface (UI) to manage account information. + + #### Required permissions for InfluxDB Cloud + + | Action | Permission required | Restriction | + |:-------|:--------------------|:------------| + | Create user | Operator token | InfluxData internal use only | + + #### Related guides + + - [Manage users](/influxdb/cloud/organizations/users/) operationId: PostUsers parameters: - $ref: '#/components/parameters/TraceSpan' @@ -17081,7 +16441,7 @@ paths: $ref: '#/components/schemas/UserResponse' description: | Success. - The response contains the newly created user. + The response body contains the user. '401': content: application/json: @@ -17089,21 +16449,23 @@ paths: $ref: '#/components/schemas/Error' description: | Unauthorized. + + #### InfluxDB Cloud + + - Returns this error if the request doesn't use an _operator token_. + Only InfluxData can access InfluxDB Cloud operator tokens. '422': content: application/json: schema: $ref: '#/components/schemas/Error' - description: > + description: | Unprocessable entity. - The error may indicate one of the following problems: - - - The request body isn't valid--the request is well-formed, but - InfluxDB can't process it due to semantic errors. - + - The request body isn't valid--the request is well-formed, + but InfluxDB can't process it due to semantic errors. - You passed a parameter combination that InfluxDB doesn't support. '500': $ref: '#/components/responses/InternalServerError' @@ -17115,10 +16477,34 @@ paths: - Users /api/v2/users/{userID}: delete: + description: | + (InfluxData internal use only) + + Deletes a [user](/influxdb/cloud/reference/glossary/#user). + + For security purposes, once an InfluxDB user account is deleted from an + organization, the user (and their token) cannot be reactivated. + + #### InfluxDB Cloud + + - Doesn't allow managing users through the API. + Use the InfluxDB Cloud user interface (UI) to manage account information. + + #### Required permissions + + | Action | Permission required | Restriction | + |:-------|:--------------------|:------------| + | Delete user | Operator token | InfluxData internal use only | + + #### Related guides + + - [Manage users](/influxdb/cloud/organizations/users/) operationId: DeleteUsersID parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The ID of the user to delete. + - description: | + A user ID. + Deletes the specified [user](/influxdb/cloud/reference/glossary/#user). in: path name: userID required: true @@ -17126,7 +16512,7 @@ paths: type: string responses: '204': - description: User deleted + description: Success. The user is deleted. default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error @@ -17134,10 +16520,18 @@ paths: tags: - Users get: + description: | + Retrieves a [user](/influxdb/cloud/reference/glossary/#user). + + #### Related guides + + - [Manage users](/influxdb/cloud/organizations/users/) operationId: GetUsersID parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The user ID. + - description: | + A user ID. + Retrieves the specified [user](/influxdb/cloud/reference/glossary/#user). in: path name: userID required: true @@ -17149,7 +16543,7 @@ paths: application/json: schema: $ref: '#/components/schemas/UserResponse' - description: User details + description: Success. The response body contains the user. default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error @@ -17157,10 +16551,31 @@ paths: tags: - Users patch: + description: | + (InfluxData internal use only) + + Updates a [user](/influxdb/cloud/reference/glossary/#user) and returns the user. + + #### InfluxDB Cloud + + - Doesn't allow managing users through the API. + Use the InfluxDB Cloud user interface (UI) to manage account information. + + #### Required permissions for InfluxDB Cloud + + | Action | Permission required | Restriction | + |:-------|:--------------------|:------------| + | Update user | Operator token | InfluxData internal use only | + + #### Related guides + + - [Manage users](/influxdb/cloud/organizations/users/) operationId: PatchUsersID parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The ID of the user to update. + - description: | + A user ID. + Updates the specified [user](/influxdb/cloud/reference/glossary/#user). in: path name: userID required: true @@ -17171,7 +16586,7 @@ paths: application/json: schema: $ref: '#/components/schemas/User' - description: User update to apply + description: The user update to apply. required: true responses: '200': @@ -17179,7 +16594,7 @@ paths: application/json: schema: $ref: '#/components/schemas/UserResponse' - description: User updated + description: Success. The response body contains the updated user. default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error @@ -17189,14 +16604,25 @@ paths: /api/v2/users/{userID}/password: post: description: | + Updates a user password. + + Use this endpoint to let a user authenticate with + [Basic authentication credentials](#section/Authentication/BasicAuthentication) + and set a new password. + #### InfluxDB Cloud - InfluxDB Cloud doesn't support changing user passwords through the API. - Use the InfluxDB Cloud user interface to update your password. + - Doesn't allow you to manage user passwords through the API. + Use the InfluxDB Cloud user interface (UI) to update a password. + + #### Related guides + + - [InfluxDB Cloud - Change your password](/influxdb/cloud/account-management/change-password/) + - [InfluxDB OSS - Change your password](/influxdb/latest/users/change-password/) operationId: PostUsersIDPassword parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The user ID. + - description: The ID of the user to set the password for. in: path name: userID required: true @@ -17207,29 +16633,52 @@ paths: application/json: schema: $ref: '#/components/schemas/PasswordResetBody' - description: New password + description: The new password to set for the user. required: true responses: '204': - description: Password successfully updated + description: Success. The password is updated. '400': - description: > - Bad request. - - InfluxDB Cloud doesn't support changing passwords through the API - and always responds with this status. - default: content: application/json: + examples: + updatePasswordNotAllowed: + summary: Cloud API can't update passwords + value: + code: invalid + message: passwords cannot be changed through the InfluxDB Cloud API schema: $ref: '#/components/schemas/Error' - description: Unsuccessful authentication + description: | + Bad request. + + #### InfluxDB Cloud + + - Doesn't allow you to manage passwords through the API; always responds with this status. + + #### InfluxDB OSS + + - Doesn't understand a value passed in the request. + default: + $ref: '#/components/responses/GeneralServerError' + description: Unexpected error security: - BasicAuthentication: [] summary: Update a password tags: - Security and access endpoints - Users + x-codeSamples: + - label: 'cURL: use Basic auth to update the user password' + lang: Shell + source: | + curl --request POST \ + "http://localhost:8086/api/v2/users/USER_ID/password" \ + --header 'Content-type: application/json' \ + --user "USERNAME:PASSWORD" \ + --data-binary @- << EOF + {"password": ""} + EOF /api/v2/variables: get: operationId: GetVariables @@ -17488,18 +16937,13 @@ paths: - Variables /api/v2/write: post: - description: > + description: | Writes data to a bucket. - - Use this endpoint to send data in [line - protocol](/influxdb/cloud/reference/syntax/line-protocol/) format to - InfluxDB. - + Use this endpoint to send data in [line protocol](/influxdb/cloud/reference/syntax/line-protocol/) format to InfluxDB. #### InfluxDB Cloud - - Takes the following steps when you send a write request: 1. Validates the request and queues the write. @@ -17515,40 +16959,27 @@ paths: #### InfluxDB OSS - - Validates the request, handles the write synchronously, and then responds with success or failure. - - If all points were written successfully, responds with HTTP `204` - status code; + - If all points were written successfully, responds with HTTP `204` status code; otherwise, returns the first line that failed. #### Required permissions - - `write-buckets` or `write-bucket BUCKET_ID`. `BUCKET_ID` is the ID of the destination bucket. #### Rate limits (with InfluxDB Cloud) - `write` rate limits apply. - - For more information, see [limits and adjustable - quotas](/influxdb/cloud/account-management/limits/). - + For more information, see [limits and adjustable quotas](/influxdb/cloud/account-management/limits/). #### Related guides - - - [Write data with the InfluxDB - API](/influxdb/cloud/write-data/developer-tools/api). - - - [Optimize writes to - InfluxDB](/influxdb/cloud/write-data/best-practices/optimize-writes/). - - - [Troubleshoot issues writing - data](/influxdb/cloud/write-data/troubleshoot/) + - [Write data with the InfluxDB API](/influxdb/cloud/write-data/developer-tools/api) + - [Optimize writes to InfluxDB](/influxdb/cloud/write-data/best-practices/optimize-writes/) + - [Troubleshoot issues writing data](/influxdb/cloud/write-data/troubleshoot/) operationId: PostWrite parameters: - $ref: '#/components/parameters/TraceSpan' @@ -17559,27 +16990,22 @@ paths: name: Content-Encoding schema: default: identity - description: > + description: | Content coding. - - Use `gzip` for compressed data or `identity` for unmodified, - uncompressed data. + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity type: string - - description: > + - description: | The format of the data in the request body. - - To send a line protocol payload, pass `Content-Type: text/plain; - charset=utf-8`. + To send a line protocol payload, pass `Content-Type: text/plain; charset=utf-8`. in: header name: Content-Type schema: default: text/plain; charset=utf-8 - description: > - `text/plain` is the content type for line protocol. `UTF-8` is the - default character set. + description: | + `text/plain` is the content type for line protocol. `UTF-8` is the default character set. enum: - text/plain - text/plain; charset=utf-8 @@ -17608,7 +17034,8 @@ paths: - Returns only `application/json` for format and limit errors. #### Related guides - - [Troubleshoot issues writing data](/influxdb/cloud/write-data/troubleshoot/). + + - [Troubleshoot issues writing data](/influxdb/cloud/write-data/troubleshoot/) in: header name: Accept schema: @@ -17617,28 +17044,19 @@ paths: enum: - application/json type: string - - description: > + - description: | The destination organization for writes. - InfluxDB writes all points in the batch to this organization. - If you pass both `orgID` and `org`, they must both be valid. - #### InfluxDB Cloud - - Doesn't require `org` or `orgID`. - - - Writes to the bucket in the organization associated with the - authorization (API token). - + - Writes to the bucket in the organization associated with the authorization (API token). #### InfluxDB OSS - - Requires either `org` or `orgID`. - - InfluxDB writes all points in the batch to this organization. in: query name: org @@ -17646,27 +17064,19 @@ paths: schema: description: The organization name or ID. type: string - - description: > + - description: | The ID of the destination organization for writes. - If you pass both `orgID` and `org`, they must both be valid. - #### InfluxDB Cloud - - Doesn't require `org` or `orgID`. - - - Writes to the bucket in the organization associated with the - authorization (API token). - + - Writes to the bucket in the organization associated with the authorization (API token). #### InfluxDB OSS - - Requires either `org` or `orgID`. - - InfluxDB writes all points in the batch to this organization. in: query name: orgID @@ -17691,21 +17101,15 @@ paths: text/plain: examples: plain-utf8: - value: > - airSensors,sensor_id=TLM0201 - temperature=73.97038159354763,humidity=35.23103248356096,co=0.48445310567793615 - 1630424257000000000 - - airSensors,sensor_id=TLM0202 - temperature=75.30007505999716,humidity=35.651929918691714,co=0.5141876544505826 - 1630424257000000000 + value: | + airSensors,sensor_id=TLM0201 temperature=73.97038159354763,humidity=35.23103248356096,co=0.48445310567793615 1630424257000000000 + airSensors,sensor_id=TLM0202 temperature=75.30007505999716,humidity=35.651929918691714,co=0.5141876544505826 1630424257000000000 schema: format: byte type: string - description: > + description: | Data in line protocol format. - To send compressed data, do the following: 1. Use [GZIP](https://www.gzip.org/) to compress the line protocol data. @@ -17714,52 +17118,34 @@ paths: #### Related guides - - - [Best practices for optimizing - writes](/influxdb/cloud/write-data/best-practices/optimize-writes/). + - [Best practices for optimizing writes](/influxdb/cloud/write-data/best-practices/optimize-writes/) required: true responses: '204': - description: > + description: | Success. - #### InfluxDB Cloud - - Validated and queued the request. - - - Handles the write asynchronously - the write might not have - completed yet. - + - Handles the write asynchronously - the write might not have completed yet. #### InfluxDB OSS - - Successfully wrote all points in the batch. - #### Related guides - - - [How to check for write - errors](/influxdb/cloud/write-data/troubleshoot/). + - [How to check for write errors](/influxdb/cloud/write-data/troubleshoot/) '400': content: application/json: examples: measurementSchemaFieldTypeConflict: - summary: >- - (Cloud) field type conflict thrown by an explicit bucket - schema + summary: (Cloud) field type conflict thrown by an explicit bucket schema value: code: invalid - message: >- - partial write error (2 written): unable to parse - 'air_sensor,service=S1,sensor=L1 - temperature="90.5",humidity=70.0 1632850122': schema: - field type for field "temperature" not permitted by - schema; got String but expected Float + message: 'partial write error (2 written): unable to parse ''air_sensor,service=S1,sensor=L1 temperature="90.5",humidity=70.0 1632850122'': schema: field type for field "temperature" not permitted by schema; got String but expected Float' orgNotFound: summary: (OSS) organization not found value: @@ -17767,34 +17153,21 @@ paths: message: 'failed to decode request body: organization not found' schema: $ref: '#/components/schemas/LineProtocolError' - description: > + description: | Bad request. The response body contains detail about the error. - - InfluxDB returns this error if the line protocol data in the request - is malformed. - - The response body contains the first malformed line in the data, and - indicates what was expected. - - For partial writes, the number of points written and the number of - points rejected are also included. - - For more information, check the `rejected_points` measurement in - your `_monitoring` bucket. - + InfluxDB returns this error if the line protocol data in the request is malformed. + The response body contains the first malformed line in the data, and indicates what was expected. + For partial writes, the number of points written and the number of points rejected are also included. + For more information, check the `rejected_points` measurement in your `_monitoring` bucket. #### InfluxDB Cloud - - Returns this error for bucket schema conflicts. - #### InfluxDB OSS - - - Returns this error if `org` or `orgID` doesn't match an - organization. + - Returns this error if `org` or `orgID` doesn't match an organization. '401': $ref: '#/components/responses/AuthorizationError' '404': @@ -17805,9 +17178,8 @@ paths: examples: dataExceedsSizeLimitOSS: summary: InfluxDB OSS response - value: > - {"code":"request too large","message":"unable to read data: - points batch is too large"} + value: | + {"code":"request too large","message":"unable to read data: points batch is too large"} schema: $ref: '#/components/schemas/LineProtocolLengthError' text/html: @@ -17857,28 +17229,22 @@ paths: - Doesn't return this error. headers: Retry-After: - description: >- - Non-negative decimal integer indicating seconds to wait before - retrying the request. + description: Non-negative decimal integer indicating seconds to wait before retrying the request. schema: format: int32 type: integer '500': $ref: '#/components/responses/InternalServerError' '503': - description: > + description: | Service unavailable. - - Returns this error if the server is temporarily unavailable to accept writes. - - Returns a `Retry-After` header that describes when to try the - write again. + - Returns a `Retry-After` header that describes when to try the write again. headers: Retry-After: - description: >- - Non-negative decimal integer indicating seconds to wait before - retrying the request. + description: Non-negative decimal integer indicating seconds to wait before retrying the request. schema: format: int32 type: integer @@ -17895,42 +17261,42 @@ paths: - $ref: '#/components/parameters/TraceSpan' - description: | A user ID. - Only returns legacy authorizations scoped to this user. + Only returns legacy authorizations scoped to the specified [user](/influxdb/cloud/reference/glossary/#user). in: query name: userID schema: type: string - description: | A user name. - Only returns legacy authorizations scoped to this user. + Only returns legacy authorizations scoped to the specified [user](/influxdb/cloud/reference/glossary/#user). in: query name: user schema: type: string - description: | An organization ID. - Only returns legacy authorizations that belong to this organization. + Only returns legacy authorizations that belong to the specified [organization](/influxdb/cloud/reference/glossary/#organization). in: query name: orgID schema: type: string - description: | An organization name. - Only returns legacy authorizations that belong to this organization. + Only returns legacy authorizations that belong to the specified [organization](/influxdb/cloud/reference/glossary/#organization). in: query name: org schema: type: string - description: | An authorization name token. - Only returns legacy authorizations with this token (name). + Only returns legacy authorizations with the specified name. in: query name: token schema: type: string - description: | An authorization ID. - Only returns the legacy authorization with this ID. + Returns the specified legacy authorization. in: query name: authID schema: @@ -17949,9 +17315,7 @@ paths: $ref: '#/components/schemas/Links' readOnly: true type: object - description: >- - Success. The response body contains a list of legacy - `authorizations`. + description: Success. The response body contains a list of legacy `authorizations`. default: $ref: '#/components/responses/ServerError' description: Unexpected error @@ -17959,20 +17323,14 @@ paths: tags: - Legacy Authorizations post: - description: > - Creates a legacy authorization and returns the newly created - authorization. - + description: | + Creates a legacy authorization and returns the legacy authorization. #### Required permissions + - `write-users USER_ID` if you pass the `userID` property in the request body. - - `write-users USER_ID` if you pass the `userID` property in the request - body. - - - `USER_ID` is the ID of the user that you want to scope the authorization - to. + `USER_ID` is the ID of the user that you want to scope the authorization to. operationId: PostLegacyAuthorizations parameters: - $ref: '#/components/parameters/TraceSpan' @@ -18007,17 +17365,14 @@ paths: schema: properties: code: - description: > - The HTTP status code description. Default is - `unauthorized`. + description: | + The HTTP status code description. Default is `unauthorized`. enum: - unauthorized readOnly: true type: string message: - description: >- - A human-readable message that may contain detail about the - error. + description: A human-readable message that may contain detail about the error. readOnly: true type: string description: | @@ -18150,30 +17505,22 @@ paths: name: Accept schema: default: application/json - description: > + description: | Media type that the client can understand. - - **Note**: With `application/csv`, query results include [**unix - timestamps**](/influxdb/cloud/reference/glossary/#unix-timestamp) - instead of [RFC3339 - timestamps](/influxdb/cloud/reference/glossary/#rfc3339-timestamp). + **Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb/cloud/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb/cloud/reference/glossary/#rfc3339-timestamp). enum: - application/json - application/csv - text/csv - application/x-msgpack type: string - - description: >- - The content encoding (usually a compression algorithm) that the - client can understand. + - description: The content encoding (usually a compression algorithm) that the client can understand. in: header name: Accept-Encoding schema: default: identity - description: >- - The content coding. Use `gzip` for compressed data or `identity` - for unmodified, uncompressed data. + description: The content coding. Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity @@ -18194,49 +17541,33 @@ paths: name: p schema: type: string - - description: > + - description: | The database to query data from. - - This is mapped to an InfluxDB - [bucket](/influxdb/cloud/reference/glossary/#bucket). - - For more information, see [Database and retention policy - mapping](/influxdb/cloud/api/influxdb-1x/dbrp/). + This is mapped to an InfluxDB [bucket](/influxdb/cloud/reference/glossary/#bucket). + For more information, see [Database and retention policy mapping](/influxdb/cloud/api/influxdb-1x/dbrp/). in: query name: db required: true schema: type: string - - description: > + - description: | The retention policy to query data from. - - This is mapped to an InfluxDB - [bucket](/influxdb/cloud/reference/glossary/#bucket). - - For more information, see [Database and retention policy - mapping](/influxdb/cloud/api/influxdb-1x/dbrp/). + This is mapped to an InfluxDB [bucket](/influxdb/cloud/reference/glossary/#bucket). + For more information, see [Database and retention policy mapping](/influxdb/cloud/api/influxdb-1x/dbrp/). in: query name: rp schema: type: string - - description: >- - The InfluxQL query to execute. To execute multiple queries, delimit - queries with a semicolon (`;`). + - description: The InfluxQL query to execute. To execute multiple queries, delimit queries with a semicolon (`;`). in: query name: q required: true schema: type: string - - description: > + - description: | A unix timestamp precision. - - Formats timestamps as [unix (epoch) - timestamps](/influxdb/cloud/reference/glossary/#unix-timestamp) the - specified precision - - instead of [RFC3339 - timestamps](/influxdb/cloud/reference/glossary/#rfc3339-timestamp) - with nanosecond precision. + Formats timestamps as [unix (epoch) timestamps](/influxdb/cloud/reference/glossary/#unix-timestamp) the specified precision + instead of [RFC3339 timestamps](/influxdb/cloud/reference/glossary/#rfc3339-timestamp) with nanosecond precision. in: query name: epoch schema: @@ -18268,9 +17599,7 @@ paths: description: Query results headers: Content-Encoding: - description: >- - Lists encodings (usually compression algorithms) that have been - applied to the response payload. + description: Lists encodings (usually compression algorithms) that have been applied to the response payload. schema: default: identity description: | @@ -18299,9 +17628,7 @@ paths: - doesn't return this error. headers: Retry-After: - description: >- - A non-negative decimal integer indicating the seconds to delay - after the response is received. + description: A non-negative decimal integer indicating the seconds to delay after the response is received. schema: format: int32 type: integer @@ -18329,9 +17656,7 @@ paths: name: p schema: type: string - - description: >- - Bucket to write to. If none exists, InfluxDB creates a bucket with a - default 3-day retention policy. + - description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy. in: query name: db required: true @@ -18347,16 +17672,12 @@ paths: name: precision schema: type: string - - description: >- - When present, its value indicates to the database that compression - is applied to the line protocol body. + - description: When present, its value indicates to the database that compression is applied to the line protocol body. in: header name: Content-Encoding schema: default: identity - description: >- - Specifies that the line protocol in the body is encoded with gzip - or not encoded with identity. + description: Specifies that the line protocol in the body is encoded with gzip or not encoded with identity. enum: - gzip - identity @@ -18370,26 +17691,19 @@ paths: required: true responses: '204': - description: >- - Write data is correctly formatted and accepted for writing to the - bucket. + description: Write data is correctly formatted and accepted for writing to the bucket. '400': content: application/json: schema: $ref: '#/components/schemas/LineProtocolError' - description: >- - Line protocol poorly formed and no points were written. Response - can be used to determine the first malformed line in the body - line-protocol. All data in body was rejected and not written. + description: Line protocol poorly formed and no points were written. Response can be used to determine the first malformed line in the body line-protocol. All data in body was rejected and not written. '401': content: application/json: schema: $ref: '#/components/schemas/Error' - description: >- - Token doesn't have sufficient permissions to write to this - organization and bucket or the organization and bucket do not exist. + description: Token doesn't have sufficient permissions to write to this organization and bucket or the organization and bucket do not exist. '403': content: application/json: @@ -18401,31 +17715,20 @@ paths: application/json: schema: $ref: '#/components/schemas/LineProtocolLengthError' - description: >- - Write has been rejected because the payload is too large. Error - message returns max size supported. All data in body was rejected - and not written. + description: Write has been rejected because the payload is too large. Error message returns max size supported. All data in body was rejected and not written. '429': - description: >- - Token is temporarily over quota. The Retry-After header describes - when to try the write again. + description: Token is temporarily over quota. The Retry-After header describes when to try the write again. headers: Retry-After: - description: >- - A non-negative decimal integer indicating the seconds to delay - after the response is received. + description: A non-negative decimal integer indicating the seconds to delay after the response is received. schema: format: int32 type: integer '503': - description: >- - Server is temporarily unavailable to accept writes. The Retry-After - header describes when to try the write again. + description: Server is temporarily unavailable to accept writes. The Retry-After header describes when to try the write again. headers: Retry-After: - description: >- - A non-negative decimal integer indicating the seconds to delay - after the response is received. + description: A non-negative decimal integer indicating the seconds to delay after the response is received. schema: format: int32 type: integer @@ -18443,144 +17746,84 @@ security: servers: - url: / tags: - - description: > + - description: | Use one of the following schemes to authenticate to the InfluxDB API: - - [Token authentication](#section/Authentication/TokenAuthentication) - - [Basic authentication](#section/Authentication/BasicAuthentication) - - - [Querystring - authentication](#section/Authentication/QuerystringAuthentication) - + - [Querystring authentication](#section/Authentication/QuerystringAuthentication) name: Authentication x-traitTag: true - - description: > + - description: | Create and manage authorizations (API tokens). - An _authorization_ contains a list of `read` and `write` - - permissions for organization resources and provides an API token for - authentication. - - An authorization belongs to an organization and only contains permissions - for that organization. - + permissions for organization resources and provides an API token for authentication. + An authorization belongs to an organization and only contains permissions for that organization. In InfluxDB Cloud, an authorization with `read-authorizations` permission - can be used to view other authorizations. - - Optionally, when creating an authorization, you can scope it to a specific - user. - + Optionally, when creating an authorization, you can scope it to a specific user. #### Limitations - To follow best practices for secure API token generation and retrieval, - InfluxDB Cloud enforces access restrictions on API tokens. - - - InfluxDB Cloud only allows access to the API token value immediately - after the authorization is created. - - - You can’t change access (read/write) permissions for an API token after - it’s created. - + - InfluxDB Cloud only allows access to the API token value immediately after the authorization is created. + - You can’t change access (read/write) permissions for an API token after it’s created. - Tokens stop working when the user who created the token is deleted. - We recommend the following for managing your tokens: - - Create a generic user to create and manage tokens for writing data. - - Store your tokens in a secure password vault for future access. - #### User sessions with authorizations - If a user signs in with username and password, creating a _user session_, - - the session carries the permissions granted by all the user's - authorizations. - - To create a user session, use the [`POST - /api/v2/signin`](#operation/PostSignin) endpoint. - + the session carries the permissions granted by all the user's authorizations. + To create a user session, use the [`POST /api/v2/signin` endpoint](#operation/PostSignin). ### Related endpoints - - [Signin](#tag/Signin) - - [Signout](#tag/Signout) - ### Related guides - - [Authorize API requests](/influxdb/cloud/api-guide/api_intro/#authentication). - - [Manage API tokens](/influxdb/cloud/security/tokens/). - - [Assign a token to a specific user](/influxdb/cloud/security/tokens/create-token/). + - [Authorize API requests](/influxdb/cloud/api-guide/api_intro/#authentication) + - [Manage API tokens](/influxdb/cloud/security/tokens/) + - [Assign a token to a specific user](/influxdb/cloud/security/tokens/create-token/) name: Authorizations - name: Bucket Schemas - - description: > - Store your data in InfluxDB - [buckets](/influxdb/cloud/reference/glossary/#bucket). - + - description: | + Store your data in InfluxDB [buckets](/influxdb/cloud/reference/glossary/#bucket). A bucket is a named location where time series data is stored. All buckets - - have a [retention - period](/influxdb/cloud/reference/glossary/#retention-period), - + have a [retention period](/influxdb/cloud/reference/glossary/#retention-period), a duration of time that each data point persists. InfluxDB drops all - points with timestamps older than the bucket’s retention period. - A bucket belongs to an organization. - ### Related guides - - [Manage buckets](/influxdb/cloud/organizations/buckets/) + - [Manage buckets](/influxdb/cloud/organizations/buckets/) name: Buckets - name: Cells - name: Checks - - description: > - Many InfluxDB API endpoints require parameters to specify resources--for - example, - + - description: | + Many InfluxDB API endpoints require parameters to specify resources--for example, writing to a **bucket** in an **organization**. - ### Common query parameters - - | Query parameter | Value type | - Description | - - |:------------------------ |:--------------------- - |:-------------------------------------------| - - | `bucket` | string | The bucket name or ID - ([find your bucket](/influxdb/cloud/organizations/buckets/view-buckets/). - | - - | `bucketID` | string | The bucket ID ([find - your bucket](/influxdb/cloud/organizations/buckets/view-buckets/). | - - | `org` | string | The organization name - or ID ([find your organization](/influxdb/cloud/organizations/view-orgs/). - | - - | `orgID` | 16-byte string | The organization ID - ([find your organization](/influxdb/cloud/organizations/view-orgs/). | + | Query parameter | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `bucket` | string | The bucket name or ID ([find your bucket](/influxdb/cloud/organizations/buckets/view-buckets/). | + | `bucketID` | string | The bucket ID ([find your bucket](/influxdb/cloud/organizations/buckets/view-buckets/). | + | `org` | string | The organization name or ID ([find your organization](/influxdb/cloud/organizations/view-orgs/). | + | `orgID` | 16-byte string | The organization ID ([find your organization](/influxdb/cloud/organizations/view-orgs/). | name: Common parameters x-traitTag: true - name: Dashboards @@ -18589,66 +17832,43 @@ tags: - description: | Delete data from an InfluxDB bucket. name: Delete - - description: > + - description: | InfluxDB API endpoints use standard HTTP request and response headers. - **Note**: Not all operations support all headers. - ### Request headers - - | Header | Value type | - Description | - - |:------------------------ |:--------------------- - |:-------------------------------------------| - - | `Accept` | string | The content type that - the client can understand. | - - | `Authorization` | string | The authorization - scheme and credential. | - - | `Content-Encoding` | string | The compression - applied to the line protocol in the request payload. | - - | `Content-Length` | integer | The size of the - entity-body, in bytes, sent to the database. | - - | `Content-Type` | string | The format of the - data in the request body. | + | Header | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `Accept` | string | The content type that the client can understand. | + | `Authorization` | string | The authorization scheme and credential. | + | `Content-Encoding` | string | The compression applied to the line protocol in the request payload. | + | `Content-Length` | integer | The size of the entity-body, in bytes, sent to the database. | + | `Content-Type` | string | The format of the data in the request body. | name: Headers x-traitTag: true - - description: > - Manage and execute scripts as API endpoints in InfluxDB. + - description: | + Store, manage, and execute scripts in InfluxDB. + A script stores your custom Flux script and provides an invokable + endpoint that accepts runtime parameters. + In a script, you can specify custom runtime parameters + (`params`)--for example, `params.myparameter`. + Once you create a script, InfluxDB generates an + [`/api/v2/scripts/SCRIPT_ID/invoke` endpoint](#operation/PostScriptsIDInvoke) + for your organization. + You can run the script from API requests and tasks, defining parameter + values for each run. + When the script runs, InfluxDB replaces `params` references in the + script with the runtime parameter values you define. + Use the `/api/v2/scripts` endpoints to create and manage scripts. + See related guides to learn how to define parameters and execute scripts. - An API Invokable Script assigns your custom Flux script to a new + #### Related guides - InfluxDB API endpoint for your organization. - - Invokable scripts let you execute your script as an HTTP request to the - endpoint. - - - Invokable scripts accept parameters. - - Add parameter references in your script as `params.myparameter`. - - When you `invoke` your script, you send parameters as key-value pairs in - the `params` object. - - Then, InfluxDB executes your script with the key-value pairs as arguments, - and returns the result. - - - ### Related guides - - - - [Invoke custom - scripts](/influxdb/cloud/api-guide/api-invokable-scripts/). + - [Invoke custom scripts](/influxdb/cloud/api-guide/api-invokable-scripts/) from API requests. + - [Create a task that references a script](/influxdb/cloud/process-data/manage-tasks/create-task/#create-a-task-that-references-a-script) name: Invokable Scripts - name: Labels - name: Legacy Authorizations @@ -18657,92 +17877,44 @@ tags: - name: Limits - name: NotificationEndpoints - name: NotificationRules - - description: > - Manage your - [organization](/influxdb/cloud/reference/glossary/#organization). - + - description: | + Manage your [organization](/influxdb/cloud/reference/glossary/#organization). An organization is a workspace for a group of users. Organizations can be - used to separate different environments, projects, teams or users within - InfluxDB. - Use the `/api/v2/orgs` endpoints to view and manage organizations. name: Organizations - name: Ping - description: | Retrieve data, analyze queries, and get query suggestions. name: Query - - description: > - See the [**API Quick Start**](/influxdb/cloud/api-guide/api_intro/) to get - up and running authenticating with tokens, writing to buckets, and - querying data. + - description: | + See the [**API Quick Start**](/influxdb/cloud/api-guide/api_intro/) to get up and running authenticating with tokens, writing to buckets, and querying data. - - [**InfluxDB API client - libraries**](/influxdb/cloud/api-guide/client-libraries/) are available - for popular languages and ready to import into your application. + [**InfluxDB API client libraries**](/influxdb/cloud/api-guide/client-libraries/) are available for popular languages and ready to import into your application. name: Quick start x-traitTag: true - name: Resources - - description: > - The InfluxDB API uses standard HTTP status codes for success and failure - responses. - - The response body may include additional details. For details about a - specific operation's response, see **Responses** and **Response Samples** - for that operation. - + - description: | + The InfluxDB API uses standard HTTP status codes for success and failure responses. + The response body may include additional details. For details about a specific operation's response, see **Responses** and **Response Samples** for that operation. API operations may return the following HTTP status codes: - |  Code  | Status | Description | - |:-----------:|:------------------------ |:--------------------- | - | `200` | Success | | - - | `204` | No content | For a `POST` request, `204` - indicates that InfluxDB accepted the request and request data is valid. - Asynchronous operations, such as `write`, might not have completed yet. | - - | `400` | Bad request | May indicate one of the - following:
  • Line protocol is malformed. The response body contains - the first malformed line in the data and indicates what was expected. For - partial writes, the number of points written and the number of points - rejected are also included. For more information, check the - `rejected_points` measurement in your `_monitoring` - bucket.
  • `Authorization` header is missing or malformed or the API - token doesn't have permission for the operation.
| - - | `401` | Unauthorized | May indicate one of the - following:
  • `Authorization: Token` header is missing or - malformed
  • API token value is missing from the header
  • API - token doesn't have permission. For more information about token types and - permissions, see [Manage API - tokens](/influxdb/cloud/security/tokens/)
| - - | `404` | Not found | Requested resource was not - found. `message` in the response body provides details about the requested - resource. | - - | `413` | Request entity too large | Request payload exceeds the - size limit. | - - | `422` | Unprocessable entity | Request data is invalid. `code` - and `message` in the response body provide details about the problem. | - - | `429` | Too many requests | API token is temporarily over - the request quota. The `Retry-After` header describes when to try the - request again. | - + | `204` | No content | For a `POST` request, `204` indicates that InfluxDB accepted the request and request data is valid. Asynchronous operations, such as `write`, might not have completed yet. | + | `400` | Bad request | InfluxDB can't parse the request due to an incorrect parameter or bad syntax. For _writes_, the error may indicate one of the following problems:
  • Line protocol is malformed. The response body contains the first malformed line in the data and indicates what was expected. For partial writes, the number of points written and the number of points rejected are also included. For more information, check the `rejected_points` measurement in your [_monitoring bucket](/influxdb/cloud/reference/internals/system-buckets/#_monitoring-system-bucket).
  • `Authorization` header is missing or malformed or the API token doesn't have permission for the operation.
| + | `401` | Unauthorized | May indicate one of the following:
  • `Authorization: Token` header is missing or malformed
  • API token value is missing from the header
  • API token doesn't have permission. For more information about token types and permissions, see [Manage API tokens](/influxdb/cloud/security/tokens/)
| + | `404` | Not found | Requested resource was not found. `message` in the response body provides details about the requested resource. | + | `405` | Method not allowed | The API path doesn't support the HTTP method used in the request--for example, you send a `POST` request to an endpoint that only allows `GET`. | + | `413` | Request entity too large | Request payload exceeds the size limit. | + | `422` | Unprocessable entity | Request data is invalid. `code` and `message` in the response body provide details about the problem. | + | `429` | Too many requests | API token is temporarily over the request quota. The `Retry-After` header describes when to try the request again. | | `500` | Internal server error | | - - | `503` | Service unavailable | Server is temporarily - unavailable to process the request. The `Retry-After` header describes - when to try the request again. | + | `503` | Service unavailable | Server is temporarily unavailable to process the request. The `Retry-After` header describes when to try the request again. | name: Response codes x-traitTag: true - name: Routes @@ -18753,113 +17925,66 @@ tags: - name: Signin - name: Signout - name: System information endpoints - - description: > - Process and analyze your data with tasks in the InfluxDB task engine. - - With tasks, you can schedule Flux scripts to query, analyze, modify, and - act on data. - - In InfluxDB Cloud, you can create tasks that run [invokable - scripts](#tag/Invokable-Scripts) - + - description: | + Process and analyze your data with [tasks](/influxdb/cloud/reference/glossary/#task) in the InfluxDB task engine. + With tasks, you can schedule Flux scripts to query, analyze, modify, and act on data. + In InfluxDB Cloud, you can create tasks that run [invokable scripts](#tag/Invokable-Scripts) with parameters. - - Use the `/api/v2/tasks` endpoints to create and manage tasks, retry task - runs, and retrieve run logs. - + Use the `/api/v2/tasks` endpoints to create and manage tasks, retry task runs, and retrieve run logs. #### Related guides - - - [Get started with tasks](/influxdb/cloud/process-data/get-started/). - - - [Common data processing - tasks](/influxdb/cloud/process-data/common-tasks/) - - - [Create a - script](/influxdb/cloud/api-guide/api-invokable-scripts/#create-an-invokable-script). + - [Get started with tasks](/influxdb/cloud/process-data/get-started/) + - [Common data processing tasks](/influxdb/cloud/process-data/common-tasks/) + - [Create a script](/influxdb/cloud/api-guide/api-invokable-scripts/#create-an-invokable-script) name: Tasks - name: Telegraf Plugins - name: Telegrafs - - description: > + - description: | Export and apply InfluxDB **templates**. - Manage **stacks** of templated InfluxDB resources. - InfluxDB templates are prepackaged configurations for - everything from dashboards and Telegraf to notifications and alerts. - Use InfluxDB templates to quickly configure a fresh instance of InfluxDB, - back up your dashboard configuration, or share your configuration with the - InfluxData community. - - Use the `/api/v2/templates` endpoints to export templates and apply - templates. - + Use the `/api/v2/templates` endpoints to export templates and apply templates. **InfluxDB stacks** are stateful InfluxDB templates that let you - - add, update, and remove installed template resources over time, avoid - duplicating - + add, update, and remove installed template resources over time, avoid duplicating resources when applying the same or similar templates more than once, and - apply changes to distributed instances of InfluxDB OSS or InfluxDB Cloud. - Use the `/api/v2/stacks` endpoints to manage installed template resources. - #### Related guides - - [InfluxDB stacks](/influxdb/cloud/influxdb-templates/stacks/) - - [InfluxDB templates](/influxdb/cloud/influxdb-templates/) name: Templates - name: Usage - - description: > + - description: | Retrieve specific users. - - InfluxDB Cloud lets you invite and collaborate with multiple users in your - organization. - - To invite and remove users from your organization, use the InfluxDB Cloud - user interface (UI); - + InfluxDB Cloud lets you invite and collaborate with multiple users in your organization. + To invite and remove users from your organization, use the InfluxDB Cloud user interface (UI); you can't use the InfluxDB API to manage users in InfluxDB Cloud. - Once a user is added to your organization, you can use the - `GET /api/v2/users` and `GET /api/v2/users/USER_ID` API endpoints to - view specific members. - #### User sessions with authorizations - Optionally, you can scope an authorization (and its API token) to a user. - If a user signs in with username and password, creating a _user session_, - - the session carries the permissions granted by all the user's - authorizations. - - To create a user session, use the [`POST - /api/v2/signin`](#operation/PostSignin) endpoint. - + the session carries the permissions granted by all the user's authorizations. + To create a user session, use the [`POST /api/v2/signin` endpoint](#operation/PostSignin). #### Related guides - - [Manage users](/influxdb/cloud/organizations/users/) name: Users - name: Variables diff --git a/api-docs/cloud/swaggerV1Compat.yml b/api-docs/cloud/swaggerV1Compat.yml index c96be9f27..02860b1ce 100644 --- a/api-docs/cloud/swaggerV1Compat.yml +++ b/api-docs/cloud/swaggerV1Compat.yml @@ -2,20 +2,13 @@ openapi: 3.0.0 info: title: InfluxDB Cloud v1 compatibility API documentation version: 0.1.0 - description: > - The InfluxDB 1.x compatibility /write and /query endpoints work with - InfluxDB 1.x client libraries and third-party integrations like Grafana and - others. - - - If you want to use the latest InfluxDB /api/v2 API instead, see the - [InfluxDB v2 API documentation](/influxdb/cloud/api/). + description: | + The InfluxDB 1.x compatibility /write and /query endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others. + If you want to use the latest InfluxDB /api/v2 API instead, see the [InfluxDB v2 API documentation](/influxdb/cloud/api/). This documentation is generated from the - - [InfluxDB OpenAPI - specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/swaggerV1Compat.yml). + [InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/swaggerV1Compat.yml). servers: - url: / paths: @@ -41,9 +34,7 @@ paths: schema: type: string required: true - description: >- - Bucket to write to. If none exists, InfluxDB creates a bucket with a - default 3-day retention policy. + description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy. - in: query name: rp schema: @@ -56,36 +47,25 @@ paths: description: Write precision. - in: header name: Content-Encoding - description: >- - When present, its value indicates to the database that compression - is applied to the line protocol body. + description: When present, its value indicates to the database that compression is applied to the line protocol body. schema: type: string - description: >- - Specifies that the line protocol in the body is encoded with gzip - or not encoded with identity. + description: Specifies that the line protocol in the body is encoded with gzip or not encoded with identity. default: identity enum: - gzip - identity responses: '204': - description: >- - Write data is correctly formatted and accepted for writing to the - bucket. + description: Write data is correctly formatted and accepted for writing to the bucket. '400': - description: >- - Line protocol poorly formed and no points were written. Response - can be used to determine the first malformed line in the body - line-protocol. All data in body was rejected and not written. + description: Line protocol poorly formed and no points were written. Response can be used to determine the first malformed line in the body line-protocol. All data in body was rejected and not written. content: application/json: schema: $ref: '#/components/schemas/LineProtocolError' '401': - description: >- - Token does not have sufficient permissions to write to this - organization and bucket or the organization and bucket do not exist. + description: Token does not have sufficient permissions to write to this organization and bucket or the organization and bucket do not exist. content: application/json: schema: @@ -97,35 +77,24 @@ paths: schema: $ref: '#/components/schemas/Error' '413': - description: >- - Write has been rejected because the payload is too large. Error - message returns max size supported. All data in body was rejected - and not written. + description: Write has been rejected because the payload is too large. Error message returns max size supported. All data in body was rejected and not written. content: application/json: schema: $ref: '#/components/schemas/LineProtocolLengthError' '429': - description: >- - Token is temporarily over quota. The Retry-After header describes - when to try the write again. + description: Token is temporarily over quota. The Retry-After header describes when to try the write again. headers: Retry-After: - description: >- - A non-negative decimal integer indicating the seconds to delay - after the response is received. + description: A non-negative decimal integer indicating the seconds to delay after the response is received. schema: type: integer format: int32 '503': - description: >- - Server is temporarily unavailable to accept writes. The Retry-After - header describes when to try the write again. + description: Server is temporarily unavailable to accept writes. The Retry-After header describes when to try the write again. headers: Retry-After: - description: >- - A non-negative decimal integer indicating the seconds to delay - after the response is received. + description: A non-negative decimal integer indicating the seconds to delay after the response is received. schema: type: integer format: int32 @@ -155,10 +124,7 @@ paths: name: Accept schema: type: string - description: >- - Specifies how query results should be encoded in the response. - **Note:** With `application/csv`, query results include epoch - timestamps instead of RFC3339 timestamps. + description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps. default: application/json enum: - application/json @@ -167,15 +133,10 @@ paths: - application/x-msgpack - in: header name: Accept-Encoding - description: >- - The Accept-Encoding request HTTP header advertises which content - encoding, usually a compression algorithm, the client is able to - understand. + description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand. schema: type: string - description: >- - Specifies that the query response in the body should be encoded - with gzip or not encoded with identity. + description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity. default: identity enum: - gzip @@ -207,23 +168,16 @@ paths: description: Query results headers: Content-Encoding: - description: >- - The Content-Encoding entity header is used to compress the - media-type. When present, its value indicates which encodings - were applied to the entity-body + description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body schema: type: string - description: >- - Specifies that the response in the body is encoded with gzip - or not encoded with identity. + description: Specifies that the response in the body is encoded with gzip or not encoded with identity. default: identity enum: - gzip - identity Trace-Id: - description: >- - The Trace-Id header reports the request's trace ID, if one was - generated. + description: The Trace-Id header reports the request's trace ID, if one was generated. schema: type: string description: Specifies the request's trace ID. @@ -242,14 +196,10 @@ paths: type: string format: binary '429': - description: >- - Token is temporarily over quota. The Retry-After header describes - when to try the read again. + description: Token is temporarily over quota. The Retry-After header describes when to try the read again. headers: Retry-After: - description: >- - A non-negative decimal integer indicating the seconds to delay - after the response is received. + description: A non-negative decimal integer indicating the seconds to delay after the response is received. schema: type: integer format: int32 @@ -330,11 +280,8 @@ components: items: {} InfluxQLCSVResponse: type: string - example: > - name,tags,time,test_field,test_tag - test_measurement,,1603740794286107366,1,tag_value - test_measurement,,1603740870053205649,2,tag_value - test_measurement,,1603741221085428881,3,tag_value + example: | + name,tags,time,test_field,test_tag test_measurement,,1603740794286107366,1,tag_value test_measurement,,1603740870053205649,2,tag_value test_measurement,,1603741221085428881,3,tag_value Error: properties: code: @@ -379,15 +326,11 @@ components: type: string op: readOnly: true - description: >- - Op describes the logical code operation during error. Useful for - debugging. + description: Op describes the logical code operation during error. Useful for debugging. type: string err: readOnly: true - description: >- - Err is a stack of errors that occurred during processing of the - request. Useful for debugging. + description: Err is a stack of errors that occurred during processing of the request. Useful for debugging. type: string line: readOnly: true @@ -425,30 +368,21 @@ components: type: apiKey name: Authorization in: header - description: > - Use the [Token - authentication](#section/Authentication/TokenAuthentication) - + description: | + Use the [Token authentication](#section/Authentication/TokenAuthentication) scheme to authenticate to the InfluxDB API. - In your API requests, send an `Authorization` header. - - For the header value, provide the word `Token` followed by a space and - an InfluxDB API token. - + For the header value, provide the word `Token` followed by a space and an InfluxDB API token. The word `Token` is case-sensitive. - ### Syntax - `Authorization: Token YOUR_INFLUX_TOKEN` - For examples and more information, see the following: - [`/authorizations`](#tag/Authorizations) endpoint. - [Authorize API requests](/influxdb/cloud/api-guide/api_intro/#authentication). @@ -456,54 +390,37 @@ components: BasicAuthentication: type: http scheme: basic - description: > - Use the HTTP [Basic - authentication](#section/Authentication/BasicAuthentication) - - scheme with clients that support the InfluxDB 1.x convention of username - and password (that don't support the `Authorization: Token` scheme): + description: | + Use the HTTP [Basic authentication](#section/Authentication/BasicAuthentication) + scheme with clients that support the InfluxDB 1.x convention of username and password (that don't support the `Authorization: Token` scheme): - - For examples and more information, see how to [authenticate with a - username and password](/influxdb/cloud/reference/api/influxdb-1x/). + For examples and more information, see how to [authenticate with a username and password](/influxdb/cloud/reference/api/influxdb-1x/). QuerystringAuthentication: type: apiKey in: query name: u=&p= - description: > - Use the [Querystring - authentication](#section/Authentication/QuerystringAuthentication) - - scheme with InfluxDB 1.x API parameters to provide credentials through - the query string. + description: | + Use the [Querystring authentication](#section/Authentication/QuerystringAuthentication) + scheme with InfluxDB 1.x API parameters to provide credentials through the query string. - - For examples and more information, see how to [authenticate with a - username and password](/influxdb/cloud/reference/api/influxdb-1x/). + For examples and more information, see how to [authenticate with a username and password](/influxdb/cloud/reference/api/influxdb-1x/). security: - TokenAuthentication: [] - BasicAuthentication: [] - QuerystringAuthentication: [] tags: - name: Authentication - description: > + description: | The InfluxDB 1.x API requires authentication for all requests. - InfluxDB Cloud uses InfluxDB API tokens to authenticate requests. - For more information, see the following: - - [Token authentication](#section/Authentication/TokenAuthentication) - - [Basic authentication](#section/Authentication/BasicAuthentication) - - - [Querystring - authentication](#section/Authentication/QuerystringAuthentication) - + - [Querystring authentication](#section/Authentication/QuerystringAuthentication) x-traitTag: true diff --git a/api-docs/v2.4/ref.yml b/api-docs/v2.4/ref.yml index 64f6e1620..8ac4652de 100644 --- a/api-docs/v2.4/ref.yml +++ b/api-docs/v2.4/ref.yml @@ -1,9 +1,8 @@ components: parameters: After: - description: > - Resource ID to seek from. Results are not inclusive of this ID. Use - `after` instead of `offset`. + description: | + Resource ID to seek from. Results are not inclusive of this ID. Use `after` instead of `offset`. in: query name: after required: false @@ -75,9 +74,7 @@ components: readOnly: true type: string message: - description: >- - A human-readable message that may contain detail about the - error. + description: A human-readable message that may contain detail about the error. readOnly: true type: string description: | @@ -91,25 +88,19 @@ components: application/json: examples: orgProvidedNotFound: - summary: >- - The org or orgID passed doesn't own the token passed in the - header + summary: The org or orgID passed doesn't own the token passed in the header value: code: invalid message: 'failed to decode request body: organization not found' schema: $ref: '#/components/schemas/Error' - description: > + description: | Bad request. - The response body contains detail about the error. - #### InfluxDB OSS - - - Returns this error if an incorrect value is passed for `org` or - `orgID`. + - Returns this error if an incorrect value is passed for `org` or `orgID`. GeneralServerError: content: application/json: @@ -145,21 +136,14 @@ components: message: organization not found schema: $ref: '#/components/schemas/Error' - description: > + description: | Not found. - A requested resource was not found. - - The response body contains the requested resource type and the name - value - + The response body contains the requested resource type and the name value (if you passed it)--for example: - - `"organization name \"my-org\" not found"` - - - `"organization not found"`: indicates you passed an ID that did not - match + - `"organization not found"`: indicates you passed an ID that did not match an organization. ServerError: content: @@ -223,6 +207,7 @@ components: readOnly: true type: string id: + description: The authorization ID. readOnly: true type: string links: @@ -239,15 +224,19 @@ components: readOnly: true type: object org: - description: The name of the organization that owns the token. + description: | + The organization name. + Specifies the [organization](/influxdb/v2.4/reference/glossary/#organization) that the token is scoped to. readOnly: true type: string orgID: - description: The ID of the organization. + description: | + The organization ID. + Specifies the [organization](/influxdb/v2.4/reference/glossary/#organization) that the authorization is scoped to. type: string permissions: description: | - A list of permissions for an authorization. + The list of permissions. An authorization must have at least one permission. items: $ref: '#/components/schemas/Permission' @@ -255,7 +244,11 @@ components: type: array token: description: | - The API token for authenticating InfluxDB API and CLI requests. + The API token. + [Tokens](/influxdb/v2.4/reference/glossary/#token) are + used to authenticate InfluxDB API requests and `influx` CLI commands. + If authenticated, the token is allowed the permissions of the _authorization_. + The token value is unique to the authorization. readOnly: true type: string updatedAt: @@ -263,11 +256,15 @@ components: readOnly: true type: string user: - description: The name of the user that the token is scoped to. + description: | + The user name. + Specifies the [user](/influxdb/v2.4/reference/glossary/#user) that owns the authorization. + If the authorization is _scoped_ to a user, the user; + otherwise, the creator of the authorization. readOnly: true type: string userID: - description: The ID of the user that the token is scoped to. + description: The user ID. Specifies the [user](/influxdb/v2.4/reference/glossary/#user) that owns the authorization. If _scoped_, the user that the authorization is scoped to; otherwise, the creator of the authorization. readOnly: true type: string type: object @@ -305,9 +302,7 @@ components: type: string status: default: active - description: >- - Status of the token. If `inactive`, requests using the token will be - rejected. + description: Status of the token. If `inactive`, requests using the token will be rejected. enum: - active - inactive @@ -344,9 +339,7 @@ components: - '10' type: string bounds: - description: >- - The extents of the axis in the form [lower, upper]. Clients - determine whether bounds are inclusive or exclusive of their limits. + description: The extents of the axis in the form [lower, upper]. Clients determine whether bounds are inclusive or exclusive of their limits. items: type: string maxItems: 2 @@ -371,9 +364,7 @@ components: - linear type: string BadStatement: - description: >- - A placeholder for statements for which no correct statement nodes can be - created + description: A placeholder for statements for which no correct statement nodes can be created properties: text: description: Raw source text @@ -779,10 +770,7 @@ components: readOnly: true type: string latestCompleted: - description: >- - A timestamp ([RFC3339 date/time - format](https://docs.influxdata.com/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)) - of the latest scheduled and completed run. + description: A timestamp ([RFC3339 date/time format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)) of the latest scheduled and completed run. format: date-time readOnly: true type: string @@ -919,10 +907,7 @@ components: ColorMapping: additionalProperties: type: string - description: >- - A color mapping is an object that maps time series data to a UI color - scheme to allow the UI to render graphs consistent colors across - reloads. + description: A color mapping is an object that maps time series data to a UI color scheme to allow the UI to render graphs consistent colors across reloads. example: configcat_deployments-autopromotionblocker: '#663cd0' measurement_birdmigration_europe: '#663cd0' @@ -930,9 +915,7 @@ components: series_id_2: '#edf529' type: object ConditionalExpression: - description: >- - Selects one of two expressions, `Alternate` or `Consequent`, depending - on a third boolean expression, `Test` + description: Selects one of two expressions, `Alternate` or `Consequent`, depending on a third boolean expression, `Test` properties: alternate: $ref: '#/components/schemas/Expression' @@ -1012,9 +995,7 @@ components: description: InfluxDB v1 database type: string default: - description: >- - Mapping represents the default retention policy for the database - specified. + description: Mapping represents the default retention policy for the database specified. type: boolean id: description: The ID of the DBRP mapping. @@ -1029,9 +1010,7 @@ components: description: InfluxDB v1 retention policy type: string virtual: - description: >- - Indicates an autogenerated, virtual mapping based on the bucket - name. Currently only available in OSS. + description: Indicates an autogenerated, virtual mapping based on the bucket name. Currently only available in OSS. type: boolean required: - id @@ -1050,9 +1029,7 @@ components: description: InfluxDB v1 database type: string default: - description: >- - Mapping represents the default retention policy for the database - specified. + description: Mapping represents the default retention policy for the database specified. type: boolean org: description: The name of the organization that owns this mapping. @@ -1232,10 +1209,7 @@ components: $ref: '#/components/schemas/Links' type: object DateTimeLiteral: - description: >- - Represents an instant in time with nanosecond precision in [RFC3339Nano - date/time - format](/influxdb/v2.3/reference/glossary/#rfc3339nano-timestamp). + description: Represents an instant in time with nanosecond precision in [RFC3339Nano date/time format](/influxdb/v2.3/reference/glossary/#rfc3339nano-timestamp). properties: type: $ref: '#/components/schemas/NodeType' @@ -1259,9 +1233,7 @@ components: description: If only zero values reported since time, trigger an alert type: boolean staleTime: - description: >- - String duration for time that a series is considered stale and - should not trigger deadman. + description: String duration for time that a series is considered stale and should not trigger deadman. type: string statusMessageTemplate: description: The template used to generate and write a status message. @@ -1287,9 +1259,7 @@ components: - type type: object DecimalPlaces: - description: >- - Indicates whether decimal places should be enforced, and how many digits - it should show. + description: Indicates whether decimal places should be enforced, and how many digits it should show. properties: digits: description: The number of digits after decimal to display @@ -1303,24 +1273,19 @@ components: description: The delete predicate request. properties: predicate: - description: > - An expression in [delete predicate - syntax](https://docs.influxdata.com/influxdb/v2.3/reference/syntax/delete-predicate/). + description: | + An expression in [delete predicate syntax](/influxdb/v2.3/reference/syntax/delete-predicate/). example: tag1="value1" and (tag2="value2" and tag3!="value3") type: string start: - description: > - A timestamp ([RFC3339 date/time - format](https://docs.influxdata.com/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)). - + description: | + A timestamp ([RFC3339 date/time format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)). The earliest time to delete from. format: date-time type: string stop: - description: > - A timestamp ([RFC3339 date/time - format](https://docs.influxdata.com/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)). - + description: | + A timestamp ([RFC3339 date/time format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)). The latest time to delete from. format: date-time type: string @@ -1329,39 +1294,24 @@ components: - stop type: object Dialect: - description: > + description: | Options for tabular data output. - - Default output is [annotated - CSV](/influxdb/v2.3/reference/syntax/annotated-csv/#csv-response-format) - with headers. - + Default output is [annotated CSV](/influxdb/v2.3/reference/syntax/annotated-csv/#csv-response-format) with headers. For more information about tabular data **dialect**, - - see [W3 metadata vocabulary for tabular - data](https://www.w3.org/TR/2015/REC-tabular-metadata-20151217/#dialect-descriptions). + see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-metadata-20151217/#dialect-descriptions). properties: annotations: - description: > + description: | Annotation rows to include in the results. - - An _annotation_ is metadata associated with an object (column) in - the data model. - + An _annotation_ is metadata associated with an object (column) in the data model. #### Related guides - - - See [Annotated CSV - annotations](https://docs.influxdata.com/influxdb/v2.3/reference/syntax/annotated-csv/#annotations) - for examples and more information. - + - See [Annotated CSV annotations](/influxdb/v2.3/reference/syntax/annotated-csv/#annotations) for examples and more information. For more information about **annotations** in tabular data, - - see [W3 metadata vocabulary for tabular - data](https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns). + see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns). items: enum: - group @@ -1372,32 +1322,22 @@ components: uniqueItems: true commentPrefix: default: '#' - description: >- - The character prefixed to comment strings. Default is a number sign - (`#`). + description: The character prefixed to comment strings. Default is a number sign (`#`). maxLength: 1 minLength: 0 type: string dateTimeFormat: default: RFC3339 - description: > + description: | The format for timestamps in results. - - Default is [`RFC3339` date/time - format](https://docs.influxdata.com/influxdb/v2.3/reference/glossary/#rfc3339-timestamp). - + Default is [`RFC3339` date/time format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp). To include nanoseconds in timestamps, use `RFC3339Nano`. - #### Example formatted date/time values - | Format | Value | - |:------------|:----------------------------| - | `RFC3339` | `"2006-01-02T15:04:05Z07:00"` | - | `RFC3339Nano` | `"2006-01-02T15:04:05.999999999Z07:00"` | enum: - RFC3339 @@ -1436,9 +1376,7 @@ components: $ref: '#/components/schemas/Expression' type: object Duration: - description: >- - A pair consisting of length of time and the unit of time measured. It is - the atomic unit from which all duration literals are composed. + description: A pair consisting of length of time and the unit of time measured. It is the atomic unit from which all duration literals are composed. properties: magnitude: type: integer @@ -1448,9 +1386,7 @@ components: type: string type: object DurationLiteral: - description: >- - Represents the elapsed time between two instants as an int64 nanosecond - count with syntax of golang's time.Duration + description: Represents the elapsed time between two instants as an int64 nanosecond count with syntax of golang's time.Duration properties: type: $ref: '#/components/schemas/NodeType' @@ -1482,9 +1418,7 @@ components: readOnly: true type: string err: - description: >- - Stack of errors that occurred during processing of the request. - Useful for debugging. + description: Stack of errors that occurred during processing of the request. Useful for debugging. readOnly: true type: string message: @@ -1492,9 +1426,7 @@ components: readOnly: true type: string op: - description: >- - Describes the logical code operation when the error occurred. Useful - for debugging. + description: Describes the logical code operation when the error occurred. Useful for debugging. readOnly: true type: string required: @@ -1543,9 +1475,7 @@ components: - $ref: '#/components/schemas/UnsignedIntegerLiteral' - $ref: '#/components/schemas/Identifier' ExpressionStatement: - description: >- - May consist of an expression that doesn't return a value and is executed - solely for its side-effects + description: May consist of an expression that doesn't return a value and is executed solely for its side-effects properties: expression: $ref: '#/components/schemas/Expression' @@ -1555,9 +1485,7 @@ components: Field: properties: alias: - description: >- - Alias overrides the field name in the returned response. Applies - only if type is `func` + description: Alias overrides the field name in the returned response. Applies only if type is `func` type: string args: description: Args are the arguments to the function @@ -1565,9 +1493,7 @@ components: $ref: '#/components/schemas/Field' type: array type: - description: >- - `type` describes the field type. `func` is a function. `field` is a - field reference. + description: '`type` describes the field type. `func` is a function. `field` is a field reference.' enum: - func - field @@ -1577,9 +1503,7 @@ components: - wildcard type: string value: - description: >- - value is the value of the field. Meaning of the value is implied by - the `type` key + description: value is the value of the field. Meaning of the value is implied by the `type` key type: string type: object File: @@ -1607,9 +1531,7 @@ components: additionalProperties: true type: object FloatLiteral: - description: >- - Represents floating point numbers according to the double - representations defined by the IEEE-754-1985 + description: Represents floating point numbers according to the double representations defined by the IEEE-754-1985 properties: type: $ref: '#/components/schemas/NodeType' @@ -1854,9 +1776,7 @@ components: type: array detectCoordinateFields: default: true - description: >- - If true, search results get automatically regroupped so that lon,lat - and value are treated as columns + description: If true, search results get automatically regroupped so that lon,lat and value are treated as columns type: boolean latLonColumns: $ref: '#/components/schemas/LatLonColumns' @@ -2204,11 +2124,8 @@ components: type: object InfluxqlCsvResponse: description: CSV Response to InfluxQL Query - example: > - name,tags,time,test_field,test_tag - test_measurement,,1603740794286107366,1,tag_value - test_measurement,,1603740870053205649,2,tag_value - test_measurement,,1603741221085428881,3,tag_value + example: | + name,tags,time,test_field,test_tag test_measurement,,1603740794286107366,1,tag_value test_measurement,,1603740870053205649,2,tag_value test_measurement,,1603741221085428881,3,tag_value type: string InfluxqlJsonResponse: description: JSON Response to InfluxQL Query @@ -2274,11 +2191,9 @@ components: properties: additionalProperties: type: string - description: > + description: | Key-value pairs associated with this label. - - To remove a property, send an update with an empty value (`""`) for - the key. + To remove a property, send an update with an empty value (`""`) for the key. example: color: ffb3b3 description: this is a description @@ -2293,12 +2208,10 @@ components: properties: additionalProperties: type: string - description: > + description: | Key-value pairs associated with this label. - - To remove a property, send an update with an empty value (`""`) for - the key. + To remove a property, send an update with an empty value (`""`) for the key. example: color: ffb3b3 description: this is a description @@ -2328,12 +2241,10 @@ components: type: string properties: additionalProperties: - description: > + description: | Key-value pairs associated with this label. - - To remove a property, send an update with an empty value (`""`) - for the key. + To remove a property, send an update with an empty value (`""`) for the key. type: string example: color: ffb3b3 @@ -2393,10 +2304,8 @@ components: description: The ID of the organization that the authorization is scoped to. type: string permissions: - description: > - A list of permissions that provide `read` and `write` access to - organization resources. - + description: | + A list of permissions that provide `read` and `write` access to organization resources. An authorization must contain at least one permission. items: $ref: '#/components/schemas/Permission' @@ -2543,9 +2452,7 @@ components: readOnly: true type: string err: - description: >- - Stack of errors that occurred during processing of the request. - Useful for debugging. + description: Stack of errors that occurred during processing of the request. Useful for debugging. readOnly: true type: string line: @@ -2558,9 +2465,7 @@ components: readOnly: true type: string op: - description: >- - Describes the logical code operation when the error occurred. Useful - for debugging. + description: Describes the logical code operation when the error occurred. Useful for debugging. readOnly: true type: string required: @@ -2610,19 +2515,14 @@ components: readOnly: true type: string time: - description: >- - The time ([RFC3339Nano date/time - format](https://docs.influxdata.com/influxdb/v2.3/reference/glossary/#rfc3339nano-timestamp)) - that the event occurred. + description: The time ([RFC3339Nano date/time format](/influxdb/v2.3/reference/glossary/#rfc3339nano-timestamp)) that the event occurred. example: 2006-01-02T15:04:05.999999999Z07:00 format: date-time readOnly: true type: string type: object LogicalExpression: - description: >- - Represents the rule conditions that collectively evaluate to either true - or false + description: Represents the rule conditions that collectively evaluate to either true or false properties: left: $ref: '#/components/schemas/Expression' @@ -2954,22 +2854,15 @@ components: readOnly: true type: string latestCompleted: - description: >- - A timestamp ([RFC3339 date/time - format](https://docs.influxdata.com/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)) - of the latest scheduled and completed run. + description: A timestamp ([RFC3339 date/time format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)) of the latest scheduled and completed run. format: date-time readOnly: true type: string limit: - description: >- - Don't notify me more than times every seconds. - If set, limitEvery cannot be empty. + description: Don't notify me more than times every seconds. If set, limitEvery cannot be empty. type: integer limitEvery: - description: >- - Don't notify me more than times every seconds. - If set, limit cannot be empty. + description: Don't notify me more than times every seconds. If set, limit cannot be empty. type: integer links: example: @@ -3096,18 +2989,15 @@ components: type: string retentionPeriodHrs: deprecated: true - description: > - Retention period *in nanoseconds* for the new bucket. This key's - name has been misleading since OSS 2.0 GA, please transition to use - `retentionPeriodSeconds` + description: | + Retention period *in nanoseconds* for the new bucket. This key's name has been misleading since OSS 2.0 GA, please transition to use `retentionPeriodSeconds` type: integer retentionPeriodSeconds: format: int64 type: integer token: - description: > - Authentication token to set on the initial user. If not specified, - the server will generate a token. + description: | + Authentication token to set on the initial user. If not specified, the server will generate a token. type: string username: type: string @@ -3309,31 +3199,21 @@ components: minimum: 0 type: integer shardGroupDurationSeconds: - description: > - The [shard group - duration](https://docs.influxdata.com/influxdb/v2.3/reference/glossary/#shard). - + description: | + The [shard group duration](/influxdb/v2.3/reference/glossary/#shard). The number of seconds that each shard group covers. - #### InfluxDB Cloud - - Doesn't use `shardGroupDurationsSeconds`. - #### InfluxDB OSS - - - Default value depends on the [bucket retention - period](https://docs.influxdata.com/influxdb/v2.3/reference/internals/shards/#shard-group-duration). - + - Default value depends on the [bucket retention period](/influxdb/v2.3/reference/internals/shards/#shard-group-duration). #### Related guides - - - InfluxDB [shards and shard - groups](https://docs.influxdata.com/influxdb/v2.3/reference/internals/shards/) + - InfluxDB [shards and shard groups](/influxdb/v2.3/reference/internals/shards/) format: int64 type: integer type: @@ -3360,11 +3240,9 @@ components: $ref: '#/components/schemas/Resource' properties: id: - description: > + description: | The ID of a specific resource. - - In a `permission`, applies the permission to only the resource - with this ID. + In a `permission`, applies the permission to only the resource with this ID. type: string name: description: | @@ -3376,18 +3254,14 @@ components: Optional: The name of the organization with `orgID`. type: string orgID: - description: > + description: | The ID of the organization that owns the resource. - - In a `permission`, applies the permission to all resources of - `type` owned by this organization. + In a `permission`, applies the permission to all resources of `type` owned by this organization. type: string type: - description: > + description: | The type of resource. - - In a `permission`, applies the permission to all resources of - this type. + In a `permission`, applies the permission to all resources of this type. enum: - authorizations - buckets @@ -3430,9 +3304,7 @@ components: $ref: '#/components/schemas/NodeType' type: object PipeLiteral: - description: >- - Represents a specialized literal value, indicating the left hand value - of a pipe expression + description: Represents a specialized literal value, indicating the left hand value of a pipe expression properties: type: $ref: '#/components/schemas/NodeType' @@ -3456,41 +3328,27 @@ components: $ref: '#/components/schemas/RetentionRules' rp: default: '0' - description: > - Retention policy is an InfluxDB 1.x concept that represents the - duration - - of time that each data point in the retention policy persists. Use - `rp` - + description: | + Retention policy is an InfluxDB 1.x concept that represents the duration + of time that each data point in the retention policy persists. Use `rp` for compatibility with InfluxDB 1.x. - The InfluxDB 2.x and Cloud equivalent is - - [retention - period](https://docs.influxdata.com/influxdb/v2.3/reference/glossary/#retention-period). + [retention period](/influxdb/v2.3/reference/glossary/#retention-period). type: string schemaType: $ref: '#/components/schemas/SchemaType' default: implicit - description: > + description: | Schema Type. - - Use `explicit` to enforce column names, tags, fields, and data types - for - + Use `explicit` to enforce column names, tags, fields, and data types for your data. - #### InfluxDB Cloud - - Default is `implicit`. - #### InfluxDB OSS - - Doesn't support `schemaType`. required: - orgID @@ -3546,22 +3404,18 @@ components: type: string params: additionalProperties: true - description: > + description: | Key-value pairs passed as parameters during query execution. - - To use parameters in your query, pass a _`query`_ with `params` - references (in dot notation)--for example: - + To use parameters in your query, pass a _`query`_ with `params` references (in dot notation)--for example: ```json - query: "from(bucket: params.mybucket) |> range(start: params.rangeStart) |> limit(n:1)" + query: "from(bucket: params.mybucket)\ + |> range(start: params.rangeStart) |> limit(n:1)" ``` - and pass _`params`_ with the key-value pairs--for example: - ```json params: { "mybucket": "environment", @@ -3569,14 +3423,10 @@ components: } ``` - - During query execution, InfluxDB passes _`params`_ to your script - and substitutes the values. - + During query execution, InfluxDB passes _`params`_ to your script and substitutes the values. #### Limitations - - If you use _`params`_, you can't use _`extern`_. type: object query: @@ -3645,9 +3495,7 @@ components: type: string type: object RegexpLiteral: - description: >- - Expressions begin and end with `/` and are regular expressions with - syntax accepted by RE2 + description: Expressions begin and end with `/` and are regular expressions with syntax accepted by RE2 properties: type: $ref: '#/components/schemas/NodeType' @@ -3853,11 +3701,9 @@ components: Resource: properties: id: - description: > + description: | The ID of a specific resource. - - In a `permission`, applies the permission to only the resource with - this ID. + In a `permission`, applies the permission to only the resource with this ID. type: string name: description: | @@ -3869,18 +3715,14 @@ components: Optional: The name of the organization with `orgID`. type: string orgID: - description: > + description: | The ID of the organization that owns the resource. - - In a `permission`, applies the permission to all resources of `type` - owned by this organization. + In a `permission`, applies the permission to all resources of `type` owned by this organization. type: string type: - description: > + description: | The type of resource. - - In a `permission`, applies the permission to all resources of this - type. + In a `permission`, applies the permission to all resources of this type. enum: - authorizations - buckets @@ -4001,37 +3843,27 @@ components: properties: everySeconds: default: 2592000 - description: > - The duration in seconds for how long data will be kept in the - database. - + description: | + The duration in seconds for how long data will be kept in the database. The default duration is 2592000 (30 days). - 0 represents infinite retention. example: 86400 format: int64 minimum: 0 type: integer shardGroupDurationSeconds: - description: > + description: | The shard group duration. - The duration or interval (in seconds) that each shard group covers. - #### InfluxDB Cloud - - Does not use `shardGroupDurationsSeconds`. - #### InfluxDB OSS - - Default value depends on the - - [bucket retention - period](https://docs.influxdata.com/influxdb/v2.3/v2.3/reference/internals/shards/#shard-group-duration). + [bucket retention period](/influxdb/v2.3/v2.3/reference/internals/shards/#shard-group-duration). format: int64 type: integer type: @@ -4156,10 +3988,7 @@ components: Run: properties: finishedAt: - description: >- - The time ([RFC3339Nano date/time - format](https://go.dev/src/time/format.go)) the run finished - executing. + description: The time ([RFC3339Nano date/time format](https://go.dev/src/time/format.go)) the run finished executing. example: 2006-01-02T15:04:05.999999999Z07:00 format: date-time readOnly: true @@ -4195,26 +4024,17 @@ components: readOnly: true type: array requestedAt: - description: >- - The time ([RFC3339Nano date/time - format](https://docs.influxdata.com/influxdb/v2.3/reference/glossary/#rfc3339nano-timestamp)) - the run was manually requested. + description: The time ([RFC3339Nano date/time format](/influxdb/v2.3/reference/glossary/#rfc3339nano-timestamp)) the run was manually requested. example: 2006-01-02T15:04:05.999999999Z07:00 format: date-time readOnly: true type: string scheduledFor: - description: >- - The time [RFC3339 date/time - format](https://docs.influxdata.com/influxdb/v2.3/reference/glossary/#rfc3339-timestamp) - used for the run's `now` option. + description: The time [RFC3339 date/time format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp) used for the run's `now` option. format: date-time type: string startedAt: - description: >- - The time ([RFC3339Nano date/time - format](https://go.dev/src/time/format.go)) the run started - executing. + description: The time ([RFC3339Nano date/time format](https://go.dev/src/time/format.go)) the run started executing. example: 2006-01-02T15:04:05.999999999Z07:00 format: date-time readOnly: true @@ -4234,12 +4054,9 @@ components: RunManually: properties: scheduledFor: - description: > - The time [RFC3339 date/time - format](https://docs.influxdata.com/influxdb/v2.3/reference/glossary/#rfc3339-timestamp) - + description: | + The time [RFC3339 date/time format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp) used for the run's `now` option. - Default is the server _now_ time. format: date-time nullable: true @@ -4629,9 +4446,7 @@ components: description: Specifies the API token string. Specify either `URL` or `Token`. type: string url: - description: >- - Specifies the URL of the Slack endpoint. Specify either `URL` or - `Token`. + description: Specifies the URL of the Slack endpoint. Specify either `URL` or `Token`. type: string type: object type: object @@ -4862,9 +4677,7 @@ components: decimalPlaces: $ref: '#/components/schemas/DecimalPlaces' fieldOptions: - description: >- - fieldOptions represent the fields retrieved by the query with - customization options + description: fieldOptions represent the fields retrieved by the query with customization options items: $ref: '#/components/schemas/RenamableField' type: array @@ -4884,21 +4697,15 @@ components: tableOptions: properties: fixFirstColumn: - description: >- - fixFirstColumn indicates whether the first column of the table - should be locked + description: fixFirstColumn indicates whether the first column of the table should be locked type: boolean sortBy: $ref: '#/components/schemas/RenamableField' verticalTimeAxis: - description: >- - verticalTimeAxis describes the orientation of the table by - indicating whether the time axis will be displayed vertically + description: verticalTimeAxis describes the orientation of the table by indicating whether the time axis will be displayed vertically type: boolean wrapping: - description: >- - Wrapping describes the text wrapping style to be used in table - views + description: Wrapping describes the text wrapping style to be used in table views enum: - truncate - wrap @@ -4906,9 +4713,7 @@ components: type: string type: object timeFormat: - description: >- - timeFormat describes the display format for time values according to - moment.js date formatting + description: timeFormat describes the display format for time values according to moment.js date formatting type: string type: enum: @@ -4943,33 +4748,31 @@ components: Task: properties: authorizationID: - description: >- - The ID of the authorization used when the task communicates with the - query engine. + description: | + An authorization ID. + Specifies the authorization used when the task communicates with the query engine. + + To find an authorization ID, use the + [`GET /api/v2/authorizations` endpoint](#operation/GetAuthorizations) to + list authorizations. type: string createdAt: format: date-time readOnly: true type: string cron: - description: >- - [Cron expression](https://en.wikipedia.org/wiki/Cron#Overview) that - defines the schedule on which the task runs. InfluxDB bases cron - runs on the system time. + description: A [Cron expression](https://en.wikipedia.org/wiki/Cron#Overview) that defines the schedule on which the task runs. InfluxDB uses the system time when evaluating Cron expressions. type: string description: - description: The description of the task. + description: A description of the task. type: string every: - description: >- - An interval ([duration - literal](https://docs.influxdata.com/flux/v0.x/spec/lexical-elements/#duration-literals))) - at which the task runs. `every` also determines when the task first - runs, depending on the specified time. + description: The interval ([duration literal](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)) at which the task runs. `every` also determines when the task first runs, depending on the specified time. format: duration type: string flux: - description: The Flux script that the task runs. + description: The Flux script that the task executes. + format: flux type: string id: readOnly: true @@ -4987,10 +4790,7 @@ components: readOnly: true type: string latestCompleted: - description: >- - A timestamp ([RFC3339 date/time - format](https://docs.influxdata.com/flux/v0.x/data-types/basic/time/#time-syntax)) - of the latest scheduled and completed run. + description: A timestamp ([RFC3339 date/time format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)) of the latest scheduled and completed run. format: date-time readOnly: true type: string @@ -5021,21 +4821,27 @@ components: description: The name of the task. type: string offset: - description: >- - A - [duration](https://docs.influxdata.com/flux/v0.x/spec/lexical-elements/#duration-literals) - to delay execution of the task after the scheduled time has elapsed. - `0` removes the offset. + description: A [duration](https://docs.influxdata.com/flux/v0.x/spec/lexical-elements/#duration-literals) to delay execution of the task after the scheduled time has elapsed. `0` removes the offset. format: duration type: string org: - description: The name of the organization that owns the task. + description: | + An [organization](/influxdb/v2.3/reference/glossary/#organization) name. + Specifies the organization that owns the task. type: string orgID: - description: The ID of the organization that owns the task. + description: | + An [organization](/influxdb/v2.3/reference/glossary/#organization) ID. + Specifies the organization that owns the task. type: string ownerID: - description: The ID of the user who owns the Task. + description: | + A [user](/influxdb/v2.3/reference/glossary/#user) ID. + Specifies the owner of the task. + + To find a user ID, you can use the + [`GET /api/v2/users` endpoint](#operation/GetUsers) to + list users. type: string status: $ref: '#/components/schemas/TaskStatusType' @@ -5222,14 +5028,10 @@ components: - $ref: '#/components/schemas/NotificationEndpointBase' - properties: channel: - description: >- - The ID of the telegram channel; a chat_id in - https://core.telegram.org/bots/api#sendmessage . + description: The ID of the telegram channel; a chat_id in https://core.telegram.org/bots/api#sendmessage . type: string token: - description: >- - Specifies the Telegram bot token. See - https://core.telegram.org/bots#creating-a-new-bot . + description: Specifies the Telegram bot token. See https://core.telegram.org/bots#creating-a-new-bot . type: string required: - token @@ -5243,27 +5045,20 @@ components: TelegramNotificationRuleBase: properties: disableWebPagePreview: - description: >- - Disables preview of web links in the sent messages when "true". - Defaults to "false". + description: Disables preview of web links in the sent messages when "true". Defaults to "false". type: boolean messageTemplate: description: The message template as a flux interpolated string. type: string parseMode: - description: >- - Parse mode of the message text per - https://core.telegram.org/bots/api#formatting-options. Defaults to - "MarkdownV2". + description: Parse mode of the message text per https://core.telegram.org/bots/api#formatting-options. Defaults to "MarkdownV2". enum: - MarkdownV2 - HTML - Markdown type: string type: - description: >- - The discriminator between other types of notification rules is - "telegram". + description: The discriminator between other types of notification rules is "telegram". enum: - telegram type: string @@ -5284,72 +5079,58 @@ components: kind: $ref: '#/components/schemas/TemplateKind' metadata: - description: > - Metadata properties used for the resource when the template is - applied. + description: | + Metadata properties used for the resource when the template is applied. properties: name: type: string type: object spec: - description: > - Configuration properties used for the resource when the template - is applied. - + description: | + Configuration properties used for the resource when the template is applied. Key-value pairs map to the specification for the resource. - - The following code samples show `spec` configurations for template - resources: - + The following code samples show `spec` configurations for template resources: - A bucket: - ```json - { "spec": { - "name": "iot_center", - "retentionRules": [{ - "everySeconds": 2.592e+06, - "type": "expire" - }] - } + ```json + { "spec": { + "name": "iot_center", + "retentionRules": [{ + "everySeconds": 2.592e+06, + "type": "expire" + }] } - ``` + } + ``` - A variable: - ```json - { "spec": { - "language": "flux", - "name": "Node_Service", - "query": "import \"influxdata/influxdb/v1\"\r\nv1.tagValues(bucket: \"iot_center\", - tag: \"service\")", - "type": "query" - } + ```json + { "spec": { + "language": "flux", + "name": "Node_Service", + "query": "import \"influxdata/influxdb/v1\"\r\nv1.tagValues(bucket: \"iot_center\", + tag: \"service\")", + "type": "query" } - ``` + } + ``` type: object type: object type: array TemplateApply: properties: actions: - description: > + description: | A list of `action` objects. + Actions let you customize how InfluxDB applies templates in the request. - Actions let you customize how InfluxDB applies templates in the - request. + You can use the following actions to prevent creating or updating resources: - - You can use the following actions to prevent creating or updating - resources: - - - - A `skipKind` action skips template resources of a specified - `kind`. - - - A `skipResource` action skips template resources with a specified - `metadata.name` + - A `skipKind` action skips template resources of a specified `kind`. + - A `skipResource` action skips template resources with a specified `metadata.name` and `kind`. items: oneOf: @@ -5384,14 +5165,11 @@ components: type: object type: array dryRun: - description: > + description: | Only applies a dry run of the templates passed in the request. - - Validates the template and generates a resource diff and summary. - - - Doesn't install templates or make changes to the InfluxDB - instance. + - Doesn't install templates or make changes to the InfluxDB instance. type: boolean envRefs: additionalProperties: @@ -5400,30 +5178,17 @@ components: - type: integer - type: number - type: boolean - description: > - An object with key-value pairs that map to **environment - references** in templates. - - - Environment references in templates are `envRef` objects with an - `envRef.key` + description: | + An object with key-value pairs that map to **environment references** in templates. + Environment references in templates are `envRef` objects with an `envRef.key` property. - - To substitute a custom environment reference value when applying - templates, - + To substitute a custom environment reference value when applying templates, pass `envRefs` with the `envRef.key` and the value. - - When you apply a template, InfluxDB replaces `envRef` objects in the - template - + When you apply a template, InfluxDB replaces `envRef` objects in the template with the values that you provide in the `envRefs` parameter. - - For more examples, see how to [define environment - references](https://docs.influxdata.com/influxdb/v2.3/influxdb-templates/use/#define-environment-references). - + For more examples, see how to [define environment references](/influxdb/v2.3/influxdb-templates/use/#define-environment-references). The following template fields may use environment references: @@ -5431,25 +5196,17 @@ components: - `spec.endpointName` - `spec.associations.name` - For more information about including environment references in - template fields, see how to - - [include user-definable resource - names](https://docs.influxdata.com/influxdb/v2.3/influxdb-templates/create/#include-user-definable-resource-names). + For more information about including environment references in template fields, see how to + [include user-definable resource names](/influxdb/v2.3/influxdb-templates/create/#include-user-definable-resource-names). type: object orgID: - description: > + description: | Organization ID. - InfluxDB applies templates to this organization. - The organization owns all resources created by the template. - To find your organization, see how to - - [view - organizations](https://docs.influxdata.com/influxdb/v2.3/organizations/view-orgs/). + [view organizations](/influxdb/v2.3/organizations/view-orgs/). type: string remotes: description: | @@ -5470,99 +5227,72 @@ components: secrets: additionalProperties: type: string - description: > + description: | An object with key-value pairs that map to **secrets** in queries. - Queries may reference secrets stored in InfluxDB--for example, - - the following Flux script retrieves `POSTGRES_USERNAME` and - `POSTGRES_PASSWORD` - + the following Flux script retrieves `POSTGRES_USERNAME` and `POSTGRES_PASSWORD` secrets and then uses them to connect to a PostgreSQL database: - ```js - import "sql" - import "influxdata/influxdb/secrets" + ```js + import "sql" + import "influxdata/influxdb/secrets" - username = secrets.get(key: "POSTGRES_USERNAME") - password = secrets.get(key: "POSTGRES_PASSWORD") + username = secrets.get(key: "POSTGRES_USERNAME") + password = secrets.get(key: "POSTGRES_PASSWORD") - sql.from( - driverName: "postgres", - dataSourceName: "postgresql://${username}:${password}@localhost:5432", - query: "SELECT * FROM example_table", - ) - ``` + sql.from( + driverName: "postgres", + dataSourceName: "postgresql://${username}:${password}@localhost:5432", + query: "SELECT * FROM example_table", + ) + ``` To define secret values in your `/api/v2/templates/apply` request, - pass the `secrets` parameter with key-value pairs--for example: - ```json - { - ... - "secrets": { - "POSTGRES_USERNAME": "pguser", - "POSTGRES_PASSWORD": "foo" - } - ... + ```json + { + ... + "secrets": { + "POSTGRES_USERNAME": "pguser", + "POSTGRES_PASSWORD": "foo" } - ``` - - InfluxDB stores the key-value pairs as secrets that you can access - with `secrets.get()`. + ... + } + ``` + InfluxDB stores the key-value pairs as secrets that you can access with `secrets.get()`. Once stored, you can't view secret values in InfluxDB. - #### Related guides - - - [How to pass secrets when installing a - template](https://docs.influxdata.com/influxdb/v2.3/influxdb-templates/use/#pass-secrets-when-installing-a-template) + - [How to pass secrets when installing a template](/influxdb/v2.3/influxdb-templates/use/#pass-secrets-when-installing-a-template) type: object stackID: - description: > + description: | ID of the stack to update. - - To apply templates to an existing stack in the organization, use the - `stackID` parameter. - + To apply templates to an existing stack in the organization, use the `stackID` parameter. If you apply templates without providing a stack ID, - InfluxDB initializes a new stack with all new resources. - - To find a stack ID, use the InfluxDB [`/api/v2/stacks` API - endpoint](#operation/ListStacks) to list stacks. - + To find a stack ID, use the InfluxDB [`/api/v2/stacks` API endpoint](#operation/ListStacks) to list stacks. #### Related guides - - - - [Stacks](https://docs.influxdata.com/influxdb/v2.3/influxdb-templates/stacks/) - - - [View - stacks](https://docs.influxdata.com/influxdb/v2.3/influxdb-templates/stacks/view/) + - [Stacks](/influxdb/v2.3/influxdb-templates/stacks/) + - [View stacks](/influxdb/v2.3/influxdb-templates/stacks/view/) type: string template: - description: > + description: | A template object to apply. - A template object has a `contents` property - with an array of InfluxDB resource configurations. - Pass `template` to apply only one template object. - If you use `template`, you can't use the `templates` parameter. - - If you want to apply multiple template objects, use `templates` - instead. + If you want to apply multiple template objects, use `templates` instead. properties: contentType: type: string @@ -5611,9 +5341,7 @@ components: items: properties: defaultValue: - description: >- - Default value that will be provided for the reference when no - value is provided + description: Default value that will be provided for the reference when no value is provided nullable: true oneOf: - type: string @@ -5621,9 +5349,7 @@ components: - type: number - type: boolean envRefKey: - description: >- - Key identified as environment reference and is the key identified - in the template + description: Key identified as environment reference and is the key identified in the template type: string resourceField: description: Field the environment reference corresponds too @@ -5669,10 +5395,7 @@ components: kind: $ref: '#/components/schemas/TemplateKind' name: - description: >- - if defined with id, name is used for resource exported by id. - if defined independently, resources strictly matching name are - exported + description: if defined with id, name is used for resource exported by id. if defined independently, resources strictly matching name are exported type: string required: - id @@ -6457,15 +6180,20 @@ components: User: properties: id: + description: The user ID. readOnly: true type: string name: + description: The user name. type: string oauthID: + description: The OAuth ID. type: string status: default: active - description: If inactive the user is inactive. + description: | + If `inactive`, the user is inactive. + Default is `active`. enum: - active - inactive @@ -6476,7 +6204,7 @@ components: properties: id: description: | - The ID of the user. + The user ID. readOnly: true type: string links: @@ -6490,13 +6218,13 @@ components: type: object name: description: | - The name of the user. + The user name. type: string status: default: active - description: > - The status of a user. An inactive user won't have access to - resources. + description: | + The status of a user. + An inactive user can't read or write resources. enum: - active - inactive @@ -6778,60 +6506,44 @@ components: type: object securitySchemes: BasicAuthentication: - description: > - Use the HTTP Basic authentication scheme for InfluxDB `/api/v2` API - operations that support it. - + description: | + Use the HTTP Basic authentication scheme for InfluxDB `/api/v2` API operations that support it. Username and password schemes require the following credentials: - - **username** - - **password** + + - **username** + - **password** scheme: basic type: http TokenAuthentication: - description: > - Use the [Token - authentication](#section/Authentication/TokenAuthentication) - + description: | + Use the [Token authentication](#section/Authentication/TokenAuthentication) scheme to authenticate to the InfluxDB API. - - In your API requests, send an `Authorization` header. - - For the header value, provide the word `Token` followed by a space and - an InfluxDB API token. - + For the header value, provide the word `Token` followed by a space and an InfluxDB API token. The word `Token` is case-sensitive. - - ### Syntax - - `Authorization: Token YOUR_INFLUX_API_TOKEN` - - + `Authorization: Token INFLUX_API_TOKEN` For more information and examples, see the following: - - [`/authorizations`](#tag/Authorizations) endpoint. - - [Authorize API requests](/influxdb/v2.3/api-guide/api_intro/#authentication). - - [Manage API tokens](/influxdb/v2.3/security/tokens/). + + - [`/authorizations`(#tag/Authorizations) endpoints] + - [Authorize API requests](/influxdb/v2.3/api-guide/api_intro/#authentication) + - [Manage API tokens](/influxdb/v2.3/security/tokens/) in: header name: Authorization type: apiKey info: title: InfluxDB OSS API Service version: 2.4.0 - description: > - The InfluxDB v2 API provides a programmatic interface for all interactions - with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. - + description: | + The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. This documentation is generated from the - - [InfluxDB OpenAPI - specification](https://github.com/influxdata/openapi/blob/influxdb-oss-v2.4.0/contracts/ref/oss.yml). + [InfluxDB OpenAPI specification](https://github.com/influxdata/openapi/blob/influxdb-oss-v2.4.0/contracts/ref/oss.yml). openapi: 3.0.0 paths: /api/v2: @@ -6852,63 +6564,51 @@ paths: - System information endpoints /api/v2/authorizations: get: - description: > + description: | Retrieves a list of authorizations. - - To limit which authorizations are returned, pass query parameters in - your request. - + To limit which authorizations are returned, pass query parameters in your request. If no query parameters are passed, InfluxDB returns all authorizations. - #### InfluxDB OSS - - Returns [API token](/influxdb/v2.3/reference/glossary/#token) values in authorizations. - - If the request uses an _[operator - token](/influxdb/latest/security/tokens/#operator-token)_, + - If the request uses an _[operator token](/influxdb/latest/security/tokens/#operator-token)_, InfluxDB OSS returns authorizations for all organizations in the instance. #### Required permissions - - - InfluxDB OSS requires an _[operator - token](/influxdb/latest/security/tokens/#operator-token)_. - + - InfluxDB OSS requires an _[operator token](/influxdb/latest/security/tokens/#operator-token)_. #### Related guides - - - [View tokens](/influxdb/v2.3/security/tokens/view-tokens/). + - [View tokens](/influxdb/v2.3/security/tokens/view-tokens/) operationId: GetAuthorizations parameters: - $ref: '#/components/parameters/TraceSpan' - description: | A user ID. - Only returns authorizations scoped to this user. + Only returns authorizations scoped to the specified [user](/influxdb/v2.3/reference/glossary/#user). in: query name: userID schema: type: string - description: | A user name. - Only returns authorizations scoped to this user. + Only returns authorizations scoped to the specified [user](/influxdb/v2.3/reference/glossary/#user). in: query name: user schema: type: string - - description: >- - An organization ID. Only returns authorizations that belong to this - organization. + - description: An organization ID. Only returns authorizations that belong to the specified [organization](/influxdb/v2.3/reference/glossary/#organization). in: query name: orgID schema: type: string - description: | An organization name. - Only returns authorizations that belong to this organization. + Only returns authorizations that belong to the specified [organization](/influxdb/v2.3/reference/glossary/#organization). in: query name: org schema: @@ -6935,52 +6635,32 @@ paths: - Authorizations - Security and access endpoints post: - description: > + description: | Creates an authorization. - - Use this endpoint to create an authorization, which generates an API - token - - with permissions to `read` or `write` to a specific resource or `type` - of resource. - - The response contains the new authorization with the generated API - token. - + Use this endpoint to create an authorization, which generates an API token + with permissions to `read` or `write` to a specific resource or `type` of resource. + The response contains the new authorization with the generated API token. Keep the following in mind when creating and updating authorizations: - - - To apply a permission to a specific resource, specify the resource - `id` field. - - - To apply a permission to all resources with the type, omit the - resource `id`. - - - To scope an authorization to a specific user, provide the `userID` - property. - + - To apply a permission to a specific resource, specify the resource `id` field. + - To apply a permission to all resources with the type, omit the resource `id`. + - To scope an authorization to a specific user, provide the `userID` property. #### Limitations - - - In InfluxDB OSS, API tokens are visible to the user who created the - authorization and to any + - In InfluxDB OSS, API tokens are visible to the user who created the authorization and to any user with an _[operator token](/influxdb/v2.3/security/tokens/#operator-token)_. - Even if an API token has `read-authorizations` permission, the token can't be used to view its authorization details. - Tokens stop working when the user who created the token is deleted. - - We recommend creating a generic user to create and manage tokens for - writing data. - + We recommend creating a generic user to create and manage tokens for writing data. #### Related guides - - - [Create a token](/influxdb/v2.3/security/tokens/create-token/). + - [Create a token](/influxdb/v2.3/security/tokens/create-token/) operationId: PostAuthorizations parameters: - $ref: '#/components/parameters/TraceSpan' @@ -7122,13 +6802,9 @@ paths: /api/v2/backup/kv: get: deprecated: true - description: > - Retrieves a snapshot of metadata stored in the server's embedded KV - store. - - InfluxDB versions greater than 2.1.x don't include metadata stored in - embedded SQL; - + description: | + Retrieves a snapshot of metadata stored in the server's embedded KV store. + InfluxDB versions greater than 2.1.x don't include metadata stored in embedded SQL; avoid using this endpoint with versions greater than 2.1.x. operationId: GetBackupKV parameters: @@ -7144,9 +6820,7 @@ paths: default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error - summary: >- - Download snapshot of metadata stored in the server's embedded KV store. - Don't use with InfluxDB versions greater than InfluxDB 2.1.x. + summary: Download snapshot of metadata stored in the server's embedded KV store. Don't use with InfluxDB versions greater than InfluxDB 2.1.x. tags: - Backup /api/v2/backup/metadata: @@ -7154,16 +6828,12 @@ paths: operationId: GetBackupMetadata parameters: - $ref: '#/components/parameters/TraceSpan' - - description: >- - Indicates the content encoding (usually a compression algorithm) - that the client can understand. + - description: Indicates the content encoding (usually a compression algorithm) that the client can understand. in: header name: Accept-Encoding schema: default: identity - description: >- - The content coding. Use `gzip` for compressed data or `identity` - for unmodified, uncompressed data. + description: The content coding. Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity @@ -7177,14 +6847,11 @@ paths: description: Snapshot of metadata headers: Content-Encoding: - description: >- - Lists any encodings (usually compression algorithms) that have - been applied to the response payload. + description: Lists any encodings (usually compression algorithms) that have been applied to the response payload. schema: default: identity - description: > - The content coding: `gzip` for compressed data or `identity` - for unmodified, uncompressed data. + description: | + The content coding: `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity @@ -7200,16 +6867,12 @@ paths: operationId: GetBackupShardId parameters: - $ref: '#/components/parameters/TraceSpan' - - description: >- - Indicates the content encoding (usually a compression algorithm) - that the client can understand. + - description: Indicates the content encoding (usually a compression algorithm) that the client can understand. in: header name: Accept-Encoding schema: default: identity - description: >- - The content coding. Use `gzip` for compressed data or `identity` - for unmodified, uncompressed data. + description: The content coding. Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity @@ -7221,10 +6884,7 @@ paths: schema: format: int64 type: integer - - description: >- - The earliest time [RFC3339 date/time - format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp) to - include in the snapshot. + - description: The earliest time [RFC3339 date/time format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp) to include in the snapshot. examples: RFC3339: summary: RFC3339 date/time format @@ -7244,14 +6904,11 @@ paths: description: TSM snapshot. headers: Content-Encoding: - description: >- - Lists any encodings (usually compression algorithms) that have - been applied to the response payload. + description: Lists any encodings (usually compression algorithms) that have been applied to the response payload. schema: default: identity - description: > - The content coding: `gzip` for compressed data or `identity` - for unmodified, uncompressed data. + description: | + The content coding: `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity @@ -7270,33 +6927,23 @@ paths: - Backup /api/v2/buckets: get: - description: > - Retrieves a list of - [buckets](/influxdb/v2.3/reference/glossary/#bucket). - - - To limit which buckets are returned, pass query parameters in your - request. - - If no query parameters are passed, InfluxDB returns all buckets up to - the + description: | + Retrieves a list of [buckets](/influxdb/v2.3/reference/glossary/#bucket). + To limit which buckets are returned, pass query parameters in your request. + If no query parameters are passed, InfluxDB returns all buckets up to the default `limit`. - #### Limitations - - - Paging with an `offset` greater than the number of records will result - in - + - Paging with an `offset` greater than the number of records will result in an empty list of buckets--for example: The following request is paging to the 50th record, but the user only has 10 buckets. ```sh - $ curl --request GET "INFLUX_URL/api/v2/scripts?limit=1&offset=50" + $ curl --request GET "INFLUX_URL/api/v2/buckets?limit=1&offset=50" $ { "links": { @@ -7309,7 +6956,6 @@ paths: #### Related Guides - - [Manage buckets](/influxdb/v2.3/organizations/buckets/) operationId: GetBuckets parameters: @@ -7393,8 +7039,7 @@ paths: type: system updatedAt: '2022-03-15T17:22:33.726179487Z' links: - self: >- - /api/v2/buckets?descending=false&limit=20&name=_monitoring&offset=0&orgID=ORG_ID + self: /api/v2/buckets?descending=false&limit=20&name=_monitoring&offset=0&orgID=ORG_ID schema: $ref: '#/components/schemas/Buckets' description: | @@ -7416,56 +7061,36 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request GET - "http://localhost:8086/api/v2/buckets?name=_monitoring" \ + source: | + curl --request GET "http://localhost:8086/api/v2/buckets?name=_monitoring" \ --header "Authorization: Token INFLUX_TOKEN" \ --header "Accept: application/json" \ --header "Content-Type: application/json" post: - description: > + description: | Creates a [bucket](/influxdb/v2.3/reference/glossary/#bucket) - and returns the created bucket along with metadata. The default data - [retention period](/influxdb/v2.3/reference/glossary/#retention-period) - is 30 days. - #### InfluxDB OSS - - A single InfluxDB OSS instance supports active writes or queries for - - approximately 20 buckets across all organizations at a given time. - Reading - + approximately 20 buckets across all organizations at a given time. Reading or writing to more than 20 buckets at a time can adversely affect - performance. - #### Limitations - - InfluxDB Cloud Free Plan allows users to create up to two buckets. - Exceeding the bucket quota will result in an HTTP `403` status code. - For additional information regarding InfluxDB Cloud offerings, see - - [InfluxDB Cloud - Pricing](https://www.influxdata.com/influxdb-cloud-pricing/). - + [InfluxDB Cloud Pricing](https://www.influxdata.com/influxdb-cloud-pricing/). #### Related Guides - - [Create bucket](/influxdb/v2.3/organizations/buckets/create-bucket/) - - - [Create bucket CLI - reference](/influxdb/v2.3/reference/cli/influx/bucket/create) + - [Create bucket CLI reference](/influxdb/v2.3/reference/cli/influx/bucket/create) operationId: PostBuckets parameters: - $ref: '#/components/parameters/TraceSpan' @@ -7577,13 +7202,11 @@ paths: }' /api/v2/buckets/{bucketID}: delete: - description: > + description: | Deletes a bucket and all associated records. - #### InfluxDB Cloud - - Does the following when you send a delete request: 1. Validates the request and queues the delete. @@ -7592,23 +7215,16 @@ paths: #### InfluxDB OSS - - Validates the request, handles the delete synchronously, - and then responds with success or failure. - #### Limitations - - Only one bucket can be deleted per request. - #### Related Guides - - - [Delete a - bucket](/influxdb/v2.3/organizations/buckets/delete-bucket/#delete-a-bucket-in-the-influxdb-ui) + - [Delete a bucket](/influxdb/v2.3/organizations/buckets/delete-bucket/#delete-a-bucket-in-the-influxdb-ui) operationId: DeleteBucketsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -7675,9 +7291,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request DELETE - "http://localhost:8086/api/v2/buckets/BUCKET_ID" \ + source: | + curl --request DELETE "http://localhost:8086/api/v2/buckets/BUCKET_ID" \ --header "Authorization: Token INFLUX_TOKEN" \ --header 'Accept: application/json' get: @@ -7755,34 +7370,23 @@ paths: tags: - Buckets patch: - description: > + description: | Updates a bucket. - Use this endpoint to update properties - (`name`, `description`, and `retentionRules`) of a bucket. - #### InfluxDB Cloud - - - Requires the `retentionRules` property in the request body. If you - don't - - provide `retentionRules`, InfluxDB responds with an HTTP `403` status - code. - + - Requires the `retentionRules` property in the request body. If you don't + provide `retentionRules`, InfluxDB responds with an HTTP `403` status code. #### InfluxDB OSS - - Doesn't require `retentionRules`. - #### Related Guides - - [Update a bucket](/influxdb/v2.3/organizations/buckets/update-bucket/) operationId: PatchBucketsID parameters: @@ -7834,17 +7438,13 @@ paths: application/json: examples: invalidJSONStringValue: - description: > - If the request body contains invalid JSON, InfluxDB returns - `invalid` - + description: | + If the request body contains invalid JSON, InfluxDB returns `invalid` with detail about the problem. summary: Invalid JSON value: code: invalid - message: >- - invalid json: invalid character '\'' looking for beginning - of value + message: 'invalid json: invalid character ''\'''' looking for beginning of value' schema: $ref: '#/components/schemas/Error' description: | @@ -7856,10 +7456,8 @@ paths: application/json: examples: invalidRetention: - summary: > - The retention policy provided exceeds the max retention for - the - + summary: | + The retention policy provided exceeds the max retention for the organization. value: code: forbidden @@ -7897,9 +7495,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request PATCH "http://localhost:8086/api/v2/buckets/BUCKET_ID - \ + source: | + curl --request PATCH "http://localhost:8086/api/v2/buckets/BUCKET_ID \ --header "Authorization: Token INFLUX_TOKEN" \ --header "Accept: application/json" \ --header "Content-Type: application/json" \ @@ -7915,34 +7512,20 @@ paths: }' /api/v2/buckets/{bucketID}/labels: get: - description: > + description: | Retrieves a list of all labels for a bucket. - - Labels are objects that contain `labelID`, `name`, `description`, and - `color` - + Labels are objects that contain `labelID`, `name`, `description`, and `color` key-value pairs. They may be used for grouping and filtering InfluxDB - resources. - - Labels are also capable of grouping across different resources--for - example, - - you can apply a label named `air_sensor` to a bucket and a task to - quickly - + Labels are also capable of grouping across different resources--for example, + you can apply a label named `air_sensor` to a bucket and a task to quickly organize resources. - #### Related guides - - - Use the [`/api/v2/labels` InfluxDB API endpoint](#tag/Labels) to - retrieve and manage labels. - - - [Manage labels in the InfluxDB - UI](/influxdb/v2.3/visualize-data/labels/) + - Use the [`/api/v2/labels` InfluxDB API endpoint](#tag/Labels) to retrieve and manage labels. + - [Manage labels in the InfluxDB UI](/influxdb/v2.3/visualize-data/labels/) operationId: GetBucketsIDLabels parameters: - $ref: '#/components/parameters/TraceSpan' @@ -7989,36 +7572,24 @@ paths: tags: - Buckets post: - description: > + description: | Adds a label to a bucket and returns the new label information. - - Labels are objects that contain `labelID`, `name`, `description`, and - `color` - - key-value pairs. They may be used for grouping and filtering across one - or - + Labels are objects that contain `labelID`, `name`, `description`, and `color` + key-value pairs. They may be used for grouping and filtering across one or more kinds of **resources**--for example, you can apply a label named - `air_sensor` to a bucket and a task to quickly organize resources. - #### Limitations - - Before adding a label to a bucket, you must create the label if you haven't already. To create a label with the InfluxDB API, send a `POST` request to the [`/api/v2/labels` endpoint](#operation/PostLabels)). #### Related guides - - - Use the [`/api/v2/labels` InfluxDB API endpoint](#tag/Labels) to - retrieve and manage labels. - - - [Manage labels in the InfluxDB - UI](/influxdb/v2.3/visualize-data/labels/) + - Use the [`/api/v2/labels` InfluxDB API endpoint](#tag/Labels) to retrieve and manage labels. + - [Manage labels in the InfluxDB UI](/influxdb/v2.3/visualize-data/labels/) operationId: PostBucketsIDLabels parameters: - $ref: '#/components/parameters/TraceSpan' @@ -8096,9 +7667,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request POST - "http://localhost:8086/api/v2/buckets/BUCKETS_ID/labels \ + source: | + curl --request POST "http://localhost:8086/api/v2/buckets/BUCKETS_ID/labels \ --header "Authorization: Token INFLUX_TOKEN" \ --header "Accept: application/json" \ --header "Content-Type: application/json" \ @@ -8266,7 +7836,7 @@ paths: $ref: '#/components/responses/BadRequestError' examples: invalidRequest: - summary: The `userID` is missing from the request body. + summary: The user `id` is missing from the request body. value: code: invalid message: user id missing or invalid @@ -8288,10 +7858,9 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request POST - "http://localhost:8086/api/v2/buckets/BUCKET_ID/members \ - --header "Authorization: Token INFLUX_TOKEN" \ + source: | + curl --request POST "http://localhost:8086/api/v2/buckets/BUCKET_ID/members \ + --header "Authorization: Token INFLUX_API_TOKEN" \ --header "Accept: application/json" \ --header "Content-Type: application/json" \ --data '{ @@ -8299,21 +7868,15 @@ paths: } /api/v2/buckets/{bucketID}/members/{userID}: delete: - description: > + description: | Removes a member from a bucket. - - Use this endpoint to remove a user's member privileges from a bucket. - This - + Use this endpoint to remove a user's member privileges from a bucket. This removes the user's `read` and `write` permissions for the bucket. - #### Related guides - - [Manage users](/influxdb/v2.3/users/) - - [Manage members](/influxdb/v2.3/organizations/members/) operationId: DeleteBucketsIDMembersID parameters: @@ -8354,10 +7917,42 @@ paths: - Buckets /api/v2/buckets/{bucketID}/owners: get: + description: | + Retrieves a list of all [owners](/influxdb/v2.3/reference/glossary/#owner) + for a bucket. + + Bucket owners have permission to delete buckets and remove user and member + permissions from the bucket. + + #### InfluxDB Cloud + + - Doesn't use `owner` and `member` roles. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. + + #### Limitations + + - Owner permissions are separate from API token permissions. + - Owner permissions are used in the context of the InfluxDB UI. + + #### Required permissions + + - `read-orgs INFLUX_ORG_ID` + + `INFLUX_ORG_ID` is the ID of the organization that you want to retrieve a + list of owners for. + + #### Related endpoints + + - [Authorizations](#tag/Authorizations) + + #### Related guides + + - [Manage users](/influxdb/v2.3/users/) operationId: GetBucketsIDOwners parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The bucket ID. + - description: | + The ID of the bucket to retrieve owners for. in: path name: bucketID required: true @@ -8367,9 +7962,31 @@ paths: '200': content: application/json: + examples: + successResponse: + value: + links: + self: /api/v2/buckets/BUCKET_ID/owners + users: + - id: d88d182d91b0950f + links: + self: /api/v2/users/d88d182d91b0950f + name: example-owner + role: owner + status: active schema: $ref: '#/components/schemas/ResourceOwners' - description: A list of bucket owners + description: | + Success. + The response body contains a list of all owners for the bucket. + '400': + $ref: '#/components/responses/BadRequestError' + '401': + $ref: '#/components/responses/AuthorizationError' + '404': + $ref: '#/components/responses/ResourceNotFoundError' + '500': + $ref: '#/components/responses/InternalServerError' default: content: application/json: @@ -8380,10 +7997,41 @@ paths: tags: - Buckets post: + description: | + Adds an owner to a bucket and returns the [owners](/influxdb/v2.3/reference/glossary/#owner) + with role and user detail. + + Use this endpoint to create a _resource owner_ for the bucket. + Bucket owners have permission to delete buckets and remove user and member + permissions from the bucket. + + #### InfluxDB Cloud + + - Doesn't use `owner` and `member` roles. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. + + #### Limitations + + - Owner permissions are separate from API token permissions. + - Owner permissions are used in the context of the InfluxDB UI. + + #### Required permissions + + - `write-orgs INFLUX_ORG_ID` + `INFLUX_ORG_ID` is the ID of the organization that you want add an owner for. + + #### Related endpoints + + - [Authorizations](#tag/Authorizations) + + #### Related guides + + - [Manage users](/influxdb/v2.3/users/) operationId: PostBucketsIDOwners parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The bucket ID. + - description: | + The ID of the bucket to add an owner for. in: path name: bucketID required: true @@ -8392,9 +8040,18 @@ paths: requestBody: content: application/json: + examples: + successResponse: + value: + id: d88d182d91b0950f + links: + self: /api/v2/users/d88d182d91b0950f + name: example-user + role: owner + status: active schema: $ref: '#/components/schemas/AddResourceMemberRequestBody' - description: User to add as owner + description: A user to add as an owner for the bucket. required: true responses: '201': @@ -8402,7 +8059,25 @@ paths: application/json: schema: $ref: '#/components/schemas/ResourceOwner' - description: Success. The user is an owner of the bucket + description: | + Created. + The bucket `owner` role is assigned to the user. + The response body contains the resource owner with + role and user detail. + '400': + $ref: '#/components/responses/BadRequestError' + examples: + invalidRequest: + summary: The user `id` is missing from the request body. + value: + code: invalid + message: user id missing or invalid + '401': + $ref: '#/components/responses/AuthorizationError' + '404': + $ref: '#/components/responses/ResourceNotFoundError' + '500': + $ref: '#/components/responses/InternalServerError' default: content: application/json: @@ -8412,18 +8087,59 @@ paths: summary: Add an owner to a bucket tags: - Buckets + x-codeSamples: + - label: cURL + lang: Shell + source: | + curl --request POST "http://localhost:8086/api/v2/buckets/BUCKET_ID/owners \ + --header "Authorization: Token INFLUX_API_TOKEN" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data '{ + "id": "09cfb87051cbe000" + } /api/v2/buckets/{bucketID}/owners/{userID}: delete: + description: | + Removes an owner from a bucket. + + Use this endpoint to remove a user's `owner` role for a bucket. + + #### InfluxDB Cloud + + - Doesn't use `owner` and `member` roles. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. + + #### Limitations + + - Owner permissions are separate from API token permissions. + - Owner permissions are used in the context of the InfluxDB UI. + + #### Required permissions + + - `write-orgs INFLUX_ORG_ID` + `INFLUX_ORG_ID` is the ID of the organization that you want to remove an owner + from. + + #### Related endpoints + + - [Authorizations](#tag/Authorizations) + + #### Related guides + + - [Manage users](/influxdb/v2.3/users/) operationId: DeleteBucketsIDOwnersID parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The ID of the owner to remove. + - description: | + The ID of the owner to remove. in: path name: userID required: true schema: type: string - - description: The bucket ID. + - description: | + The ID of the bucket to remove an owner from. in: path name: bucketID required: true @@ -8431,7 +8147,15 @@ paths: type: string responses: '204': - description: Owner removed + description: | + Success. + The user is no longer an owner of the bucket. + '401': + $ref: '#/components/responses/AuthorizationError' + '404': + $ref: '#/components/responses/ResourceNotFoundError' + '500': + $ref: '#/components/responses/InternalServerError' default: content: application/json: @@ -8764,21 +8488,15 @@ paths: - Checks /api/v2/config: get: - description: > + description: | Returns the active runtime configuration of the InfluxDB instance. - - In InfluxDB v2.2+, use this endpoint to view your active runtime - configuration, - + In InfluxDB v2.2+, use this endpoint to view your active runtime configuration, including flags and environment variables. - #### Related guides - - - [View your runtime server - configuration](/influxdb/v2.3/reference/config-options/#view-your-runtime-server-configuration) + - [View your runtime server configuration](/influxdb/v2.3/reference/config-options/#view-your-runtime-server-configuration) operationId: GetConfig parameters: - $ref: '#/components/parameters/TraceSpan' @@ -8788,11 +8506,9 @@ paths: application/json: schema: $ref: '#/components/schemas/Config' - description: > + description: | Success. - - The response body contains the active runtime configuration of the - InfluxDB instance. + The response body contains the active runtime configuration of the InfluxDB instance. '401': $ref: '#/components/responses/GeneralServerError' default: @@ -8809,9 +8525,7 @@ paths: - $ref: '#/components/parameters/Offset' - $ref: '#/components/parameters/Limit' - $ref: '#/components/parameters/Descending' - - description: >- - A user identifier. Returns only dashboards where this user has the - `owner` role. + - description: A user identifier. Returns only dashboards where this user has the `owner` role. in: query name: owner schema: @@ -8825,9 +8539,7 @@ paths: - CreatedAt - UpdatedAt type: string - - description: >- - A list of dashboard identifiers. Returns only the listed dashboards. - If both `id` and `owner` are specified, only `id` is used. + - description: A list of dashboard identifiers. Returns only the listed dashboards. If both `id` and `owner` are specified, only `id` is used. in: query name: id schema: @@ -8977,9 +8689,7 @@ paths: properties: cells: $ref: '#/components/schemas/CellWithViewProperties' - description: >- - optional, when provided will replace all existing cells with - the cells provided + description: optional, when provided will replace all existing cells with the cells provided description: description: optional, when provided will replace the description type: string @@ -9054,9 +8764,7 @@ paths: - Cells - Dashboards put: - description: >- - Replaces all cells in a dashboard. This is used primarily to update the - positional information of all cells. + description: Replaces all cells in a dashboard. This is used primarily to update the positional information of all cells. operationId: PutDashboardsIDCells parameters: - $ref: '#/components/parameters/TraceSpan' @@ -9132,9 +8840,7 @@ paths: - Cells - Dashboards patch: - description: >- - Updates the non positional information related to a cell. Updates to a - single cell's positional data could cause grid conflicts. + description: Updates the non positional information related to a cell. Updates to a single cell's positional data could cause grid conflicts. operationId: PatchDashboardsIDCellsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -9772,40 +9478,29 @@ paths: - DBRPs /debug/pprof/all: get: - description: > - Collects samples and returns reports for the following [Go runtime - profiles](https://pkg.go.dev/runtime/pprof): - + description: | + Collects samples and returns reports for the following [Go runtime profiles](https://pkg.go.dev/runtime/pprof): - **allocs**: All past memory allocations - - - **block**: Stack traces that led to blocking on synchronization - primitives - + - **block**: Stack traces that led to blocking on synchronization primitives - **cpu**: (Optional) Program counters sampled from the executing stack. Include by passing the `cpu` query parameter with a [duration](/influxdb/v2.3/reference/glossary/#duration) value. Equivalent to the report from [`GET /debug/pprof/profile?seconds=NUMBER_OF_SECONDS`](#operation/GetDebugPprofProfile). - **goroutine**: All current goroutines - - **heap**: Memory allocations for live objects - - **mutex**: Holders of contended mutexes - - - **threadcreate**: Stack traces that led to the creation of new OS - threads + - **threadcreate**: Stack traces that led to the creation of new OS threads operationId: GetDebugPprofAllProfiles parameters: - $ref: '#/components/parameters/TraceSpan' - - description: > - Collects and returns CPU profiling data for the specified - [duration](/influxdb/v2.3/reference/glossary/#duration). + - description: | + Collects and returns CPU profiling data for the specified [duration](/influxdb/v2.3/reference/glossary/#duration). in: query name: cpu schema: externalDocs: description: InfluxDB duration - url: >- - https://docs.influxdata.com/influxdb/latest/reference/glossary/#duration + url: https://docs.influxdata.com/influxdb/latest/reference/glossary/#duration format: duration type: string responses: @@ -9813,11 +9508,9 @@ paths: content: application/octet-stream: schema: - description: > + description: | GZIP compressed TAR file (`.tar.gz`) that contains - - [Go runtime profile](https://pkg.go.dev/runtime/pprof) - reports. + [Go runtime profile](https://pkg.go.dev/runtime/pprof) reports. externalDocs: description: Golang pprof package url: https://pkg.go.dev/net/http/pprof @@ -9836,32 +9529,21 @@ paths: x-codeSamples: - label: 'Shell: Get all profiles' lang: Shell - source: > - # Download and extract a `tar.gz` of all profiles after 10 seconds - of CPU sampling. - + source: | + # Download and extract a `tar.gz` of all profiles after 10 seconds of CPU sampling. curl "http://localhost:8086/debug/pprof/all?cpu=10s" | tar -xz - # x profiles/cpu.pb.gz - # x profiles/goroutine.pb.gz - # x profiles/block.pb.gz - # x profiles/mutex.pb.gz - # x profiles/heap.pb.gz - # x profiles/allocs.pb.gz - # x profiles/threadcreate.pb.gz - # Analyze a profile. - go tool pprof profiles/heap.pb.gz - label: 'Shell: Get all profiles except CPU' lang: Shell @@ -9882,29 +9564,19 @@ paths: go tool pprof profiles/heap.pb.gz /debug/pprof/allocs: get: - description: > - Returns a [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - of - + description: | + Returns a [Go runtime profile](https://pkg.go.dev/runtime/pprof) report of all past memory allocations. - **allocs** is the same as the **heap** profile, - but changes the default [pprof](https://pkg.go.dev/runtime/pprof) - display to __-alloc_space__, - - the total number of bytes allocated since the program began (including - garbage-collected bytes). + the total number of bytes allocated since the program began (including garbage-collected bytes). operationId: GetDebugPprofAllocs parameters: - $ref: '#/components/parameters/TraceSpan' - - description: > - - `0`: (Default) Return the report as a gzip-compressed protocol - buffer. - - - `1`: Return a response body with the report formatted as - human-readable text. + - description: | + - `0`: (Default) Return the report as a gzip-compressed protocol buffer. + - `1`: Return a response body with the report formatted as human-readable text. The report contains comments that translate addresses to function names and line numbers for debugging. `debug=1` is mutually exclusive with the `seconds` query parameter. @@ -9930,9 +9602,8 @@ paths: content: application/octet-stream: schema: - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - in protocol buffer format. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report in protocol buffer format. externalDocs: description: Golang pprof package url: https://pkg.go.dev/net/http/pprof @@ -9949,15 +9620,10 @@ paths: url: https://pkg.go.dev/net/http/pprof format: Go runtime profile type: string - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - compatible - - with [pprof](https://github.com/google/pprof) analysis and - visualization tools. - - If debug is enabled (`?debug=1`), response body contains a - human-readable profile. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report compatible + with [pprof](https://github.com/google/pprof) analysis and visualization tools. + If debug is enabled (`?debug=1`), response body contains a human-readable profile. default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error @@ -9969,42 +9635,29 @@ paths: x-codeSamples: - label: 'Shell: go tool pprof' lang: Shell - source: > + source: | # Analyze the profile in interactive mode. - go tool pprof http://localhost:8086/debug/pprof/allocs - # `pprof` returns the following prompt: - - # Entering interactive mode (type "help" for commands, "o" for - options) - + # Entering interactive mode (type "help" for commands, "o" for options) # (pprof) - # At the prompt, get the top N memory allocations. - (pprof) top10 /debug/pprof/block: get: - description: > - Collects samples and returns a [Go runtime - profile](https://pkg.go.dev/runtime/pprof) - - report of stack traces that led to blocking on synchronization - primitives. + description: | + Collects samples and returns a [Go runtime profile](https://pkg.go.dev/runtime/pprof) + report of stack traces that led to blocking on synchronization primitives. operationId: GetDebugPprofBlock parameters: - $ref: '#/components/parameters/TraceSpan' - - description: > - - `0`: (Default) Return the report as a gzip-compressed protocol - buffer. - - - `1`: Return a response body with the report formatted as - human-readable text. + - description: | + - `0`: (Default) Return the report as a gzip-compressed protocol buffer. + - `1`: Return a response body with the report formatted as human-readable text. The report contains comments that translate addresses to function names and line numbers for debugging. `debug=1` is mutually exclusive with the `seconds` query parameter. @@ -10030,9 +9683,8 @@ paths: content: application/octet-stream: schema: - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - in protocol buffer format. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report in protocol buffer format. externalDocs: description: Golang pprof package url: https://pkg.go.dev/net/http/pprof @@ -10049,15 +9701,10 @@ paths: url: https://pkg.go.dev/net/http/pprof format: Go runtime profile type: string - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - compatible - - with [pprof](https://github.com/google/pprof) analysis and - visualization tools. - - If debug is enabled (`?debug=1`), response body contains a - human-readable profile. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report compatible + with [pprof](https://github.com/google/pprof) analysis and visualization tools. + If debug is enabled (`?debug=1`), response body contains a human-readable profile. default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error @@ -10069,24 +9716,17 @@ paths: x-codeSamples: - label: 'Shell: go tool pprof' lang: Shell - source: > + source: | # Analyze the profile in interactive mode. - go tool pprof http://localhost:8086/debug/pprof/block - # `pprof` returns the following prompt: - - # Entering interactive mode (type "help" for commands, "o" for - options) - + # Entering interactive mode (type "help" for commands, "o" for options) # (pprof) - # At the prompt, get the top N entries. - (pprof) top10 /debug/pprof/cmdline: get: @@ -10113,18 +9753,14 @@ paths: - System information endpoints /debug/pprof/goroutine: get: - description: > - Collects statistics and returns a [Go runtime - profile](https://pkg.go.dev/runtime/pprof) - + description: | + Collects statistics and returns a [Go runtime profile](https://pkg.go.dev/runtime/pprof) report of all current goroutines. operationId: GetDebugPprofGoroutine parameters: - $ref: '#/components/parameters/TraceSpan' - - description: > - - `0`: (Default) Return the report as a gzip-compressed protocol - buffer. - + - description: | + - `0`: (Default) Return the report as a gzip-compressed protocol buffer. - `1`: Return a response body with the report formatted as human-readable text with comments that translate addresses to function names and line numbers for debugging. @@ -10152,9 +9788,8 @@ paths: content: application/octet-stream: schema: - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - in protocol buffer format. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report in protocol buffer format. externalDocs: description: Golang pprof package url: https://pkg.go.dev/net/http/pprof @@ -10171,15 +9806,10 @@ paths: url: https://pkg.go.dev/net/http/pprof format: Go runtime profile type: string - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - compatible - - with [pprof](https://github.com/google/pprof) analysis and - visualization tools. - - If debug is enabled (`?debug=1`), response body contains a - human-readable profile. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report compatible + with [pprof](https://github.com/google/pprof) analysis and visualization tools. + If debug is enabled (`?debug=1`), response body contains a human-readable profile. default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error @@ -10191,46 +9821,32 @@ paths: x-codeSamples: - label: 'Shell: go tool pprof' lang: Shell - source: > + source: | # Analyze the profile in interactive mode. - go tool pprof http://localhost:8086/debug/pprof/goroutine - # `pprof` returns the following prompt: - - # Entering interactive mode (type "help" for commands, "o" for - options) - + # Entering interactive mode (type "help" for commands, "o" for options) # (pprof) - # At the prompt, get the top N entries. - (pprof) top10 /debug/pprof/heap: get: - description: > - Collects statistics and returns a [Go runtime - profile](https://pkg.go.dev/runtime/pprof) - + description: | + Collects statistics and returns a [Go runtime profile](https://pkg.go.dev/runtime/pprof) report of memory allocations for live objects. - To run **garbage collection** before sampling, - pass the `gc` query parameter with a value of `1`. operationId: GetDebugPprofHeap parameters: - $ref: '#/components/parameters/TraceSpan' - - description: > - - `0`: (Default) Return the report as a gzip-compressed protocol - buffer. - - - `1`: Return a response body with the report formatted as - human-readable text. + - description: | + - `0`: (Default) Return the report as a gzip-compressed protocol buffer. + - `1`: Return a response body with the report formatted as human-readable text. The report contains comments that translate addresses to function names and line numbers for debugging. `debug=1` is mutually exclusive with the `seconds` query parameter. @@ -10267,9 +9883,8 @@ paths: content: application/octet-stream: schema: - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - in protocol buffer format. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report in protocol buffer format. externalDocs: description: Golang pprof package url: https://pkg.go.dev/net/http/pprof @@ -10290,15 +9905,10 @@ paths: url: https://pkg.go.dev/net/http/pprof format: Go runtime profile type: string - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - compatible - - with [pprof](https://github.com/google/pprof) analysis and - visualization tools. - - If debug is enabled (`?debug=1`), response body contains a - human-readable profile. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report compatible + with [pprof](https://github.com/google/pprof) analysis and visualization tools. + If debug is enabled (`?debug=1`), response body contains a human-readable profile. default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error @@ -10310,53 +9920,35 @@ paths: x-codeSamples: - label: 'Shell: go tool pprof' lang: Shell - source: > + source: | # Analyze the profile in interactive mode. - go tool pprof http://localhost:8086/debug/pprof/heap - # `pprof` returns the following prompt: - - # Entering interactive mode (type "help" for commands, "o" for - options) - + # Entering interactive mode (type "help" for commands, "o" for options) # (pprof) - # At the prompt, get the top N memory-intensive nodes. - (pprof) top10 - # pprof displays the list: - # Showing nodes accounting for 142.46MB, 85.43% of 166.75MB total - # Dropped 895 nodes (cum <= 0.83MB) - # Showing top 10 nodes out of 143 /debug/pprof/mutex: get: - description: > - Collects statistics and returns a [Go runtime - profile](https://pkg.go.dev/runtime/pprof) report of - + description: | + Collects statistics and returns a [Go runtime profile](https://pkg.go.dev/runtime/pprof) report of lock contentions. - - The profile contains stack traces of holders of contended mutual - exclusions (mutexes). + The profile contains stack traces of holders of contended mutual exclusions (mutexes). operationId: GetDebugPprofMutex parameters: - $ref: '#/components/parameters/TraceSpan' - - description: > - - `0`: (Default) Return the report as a gzip-compressed protocol - buffer. - - - `1`: Return a response body with the report formatted as - human-readable text. + - description: | + - `0`: (Default) Return the report as a gzip-compressed protocol buffer. + - `1`: Return a response body with the report formatted as human-readable text. The report contains comments that translate addresses to function names and line numbers for debugging. `debug=1` is mutually exclusive with the `seconds` query parameter. @@ -10382,9 +9974,8 @@ paths: content: application/octet-stream: schema: - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - in protocol buffer format. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report in protocol buffer format. externalDocs: description: Golang pprof package url: https://pkg.go.dev/net/http/pprof @@ -10401,15 +9992,10 @@ paths: url: https://pkg.go.dev/net/http/pprof format: Go runtime profile type: string - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - compatible - - with [pprof](https://github.com/google/pprof) analysis and - visualization tools. - - If debug is enabled (`?debug=1`), response body contains a - human-readable profile. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report compatible + with [pprof](https://github.com/google/pprof) analysis and visualization tools. + If debug is enabled (`?debug=1`), response body contains a human-readable profile. default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error @@ -10421,31 +10007,22 @@ paths: x-codeSamples: - label: 'Shell: go tool pprof' lang: Shell - source: > + source: | # Analyze the profile in interactive mode. - go tool pprof http://localhost:8086/debug/pprof/mutex - # `pprof` returns the following prompt: - - # Entering interactive mode (type "help" for commands, "o" for - options) - + # Entering interactive mode (type "help" for commands, "o" for options) # (pprof) - # At the prompt, get the top N entries. - (pprof) top10 /debug/pprof/profile: get: - description: > - Collects statistics and returns a [Go runtime - profile](https://pkg.go.dev/runtime/pprof) - + description: | + Collects statistics and returns a [Go runtime profile](https://pkg.go.dev/runtime/pprof) report of program counters on the executing stack. operationId: GetDebugPprofProfile parameters: @@ -10461,20 +10038,16 @@ paths: content: application/octet-stream: schema: - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - in protocol buffer format. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report in protocol buffer format. externalDocs: description: Golang pprof package url: https://pkg.go.dev/net/http/pprof format: binary type: string - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - compatible - - with [pprof](https://github.com/google/pprof) analysis and - visualization tools. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report compatible + with [pprof](https://github.com/google/pprof) analysis and visualization tools. default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error @@ -10501,20 +10074,15 @@ paths: (pprof) top10 /debug/pprof/threadcreate: get: - description: > - Collects statistics and returns a [Go runtime - profile](https://pkg.go.dev/runtime/pprof) - + description: | + Collects statistics and returns a [Go runtime profile](https://pkg.go.dev/runtime/pprof) report of stack traces that led to the creation of new OS threads. operationId: GetDebugPprofThreadCreate parameters: - $ref: '#/components/parameters/TraceSpan' - - description: > - - `0`: (Default) Return the report as a gzip-compressed protocol - buffer. - - - `1`: Return a response body with the report formatted as - human-readable text. + - description: | + - `0`: (Default) Return the report as a gzip-compressed protocol buffer. + - `1`: Return a response body with the report formatted as human-readable text. The report contains comments that translate addresses to function names and line numbers for debugging. `debug=1` is mutually exclusive with the `seconds` query parameter. @@ -10540,9 +10108,8 @@ paths: content: application/octet-stream: schema: - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - in protocol buffer format. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report in protocol buffer format. externalDocs: description: Golang pprof package url: https://pkg.go.dev/net/http/pprof @@ -10563,15 +10130,10 @@ paths: url: https://pkg.go.dev/net/http/pprof format: Go runtime profile type: string - description: > - [Go runtime profile](https://pkg.go.dev/runtime/pprof) report - compatible - - with [pprof](https://github.com/google/pprof) analysis and - visualization tools. - - If debug is enabled (`?debug=1`), response body contains a - human-readable profile. + description: | + [Go runtime profile](https://pkg.go.dev/runtime/pprof) report compatible + with [pprof](https://github.com/google/pprof) analysis and visualization tools. + If debug is enabled (`?debug=1`), response body contains a human-readable profile. default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error @@ -10583,30 +10145,22 @@ paths: x-codeSamples: - label: 'Shell: go tool pprof' lang: Shell - source: > + source: | # Analyze the profile in interactive mode. - go tool pprof http://localhost:8086/debug/pprof/threadcreate - # `pprof` returns the following prompt: - - # Entering interactive mode (type "help" for commands, "o" for - options) - + # Entering interactive mode (type "help" for commands, "o" for options) # (pprof) - # At the prompt, get the top N entries. - (pprof) top10 /debug/pprof/trace: get: - description: > - Collects profile data and returns trace execution events for the current - program. + description: | + Collects profile data and returns trace execution events for the current program. operationId: GetDebugPprofTrace parameters: - $ref: '#/components/parameters/TraceSpan' @@ -10650,17 +10204,13 @@ paths: go tool trace ./trace /api/v2/delete: post: - description: > + description: | Deletes data from a bucket. - - Use this endpoint to delete points from a bucket in a specified time - range. - + Use this endpoint to delete points from a bucket in a specified time range. #### InfluxDB Cloud - - Does the following when you send a delete request: 1. Validates the request and queues the delete. @@ -10669,103 +10219,74 @@ paths: #### InfluxDB OSS - - Validates the request, handles the delete synchronously, and then responds with success or failure. #### Required permissions - - `write-buckets` or `write-bucket BUCKET_ID`. `BUCKET_ID` is the ID of the destination bucket. #### Rate limits (with InfluxDB Cloud) - `write` rate limits apply. - - For more information, see [limits and adjustable - quotas](/influxdb/cloud/account-management/limits/). - + For more information, see [limits and adjustable quotas](/influxdb/cloud/account-management/limits/). #### Related guides - - - [Delete data](/influxdb/v2.3/write-data/delete-data/). - - - Learn how to use [delete predicate - syntax](/influxdb/v2.3/reference/syntax/delete-predicate/). - - - Learn how InfluxDB handles [deleted - tags](https://docs.influxdata.com/flux/v0.x/stdlib/influxdata/influxdb/schema/measurementtagkeys/) + - [Delete data](/influxdb/v2.3/write-data/delete-data/) + - Learn how to use [delete predicate syntax](/influxdb/v2.3/reference/syntax/delete-predicate/). + - Learn how InfluxDB handles [deleted tags](https://docs.influxdata.com/flux/v0.x/stdlib/influxdata/influxdb/schema/measurementtagkeys/) and [deleted fields](https://docs.influxdata.com/flux/v0.x/stdlib/influxdata/influxdb/schema/measurementfieldkeys/). operationId: PostDelete parameters: - $ref: '#/components/parameters/TraceSpan' - - description: > + - description: | The organization to delete data from. - If you pass both `orgID` and `org`, they must both be valid. - #### InfluxDB Cloud - - Doesn't require `org` or `orgID`. - - - Deletes data from the bucket in the organization associated with - the authorization (API token). - + - Deletes data from the bucket in the organization associated with the authorization (API token). #### InfluxDB OSS - - Requires either `org` or `orgID`. in: query name: org schema: description: The organization name or ID. type: string - - description: > + - description: | The name or ID of the bucket to delete data from. - - If you pass both `bucket` and `bucketID`, `bucketID` takes - precedence. + If you pass both `bucket` and `bucketID`, `bucketID` takes precedence. in: query name: bucket schema: description: The bucket name or ID. type: string - - description: > + - description: | The ID of the organization to delete data from. - If you pass both `orgID` and `org`, they must both be valid. - #### InfluxDB Cloud - - Doesn't require `org` or `orgID`. - - - Deletes data from the bucket in the organization associated with - the authorization (API token). - + - Deletes data from the bucket in the organization associated with the authorization (API token). #### InfluxDB OSS - - Requires either `org` or `orgID`. in: query name: orgID schema: description: The organization ID. type: string - - description: > + - description: | The ID of the bucket to delete data from. - - If you pass both `bucket` and `bucketID`, `bucketID` takes - precedence. + If you pass both `bucket` and `bucketID`, `bucketID` takes precedence. in: query name: bucketID schema: @@ -10776,61 +10297,38 @@ paths: application/json: schema: $ref: '#/components/schemas/DeletePredicateRequest' - description: > + description: | Time range parameters and an optional **delete predicate expression**. - To select points to delete within the specified time range, pass a - - **delete predicate expression** in the `predicate` property of the - request body. - - If you don't pass a `predicate`, InfluxDB deletes all data with - timestamps - + **delete predicate expression** in the `predicate` property of the request body. + If you don't pass a `predicate`, InfluxDB deletes all data with timestamps in the specified time range. - #### Related guides - - - [Delete data](/influxdb/v2.3/write-data/delete-data/). - - - Learn how to use [delete predicate - syntax](/influxdb/v2.3/reference/syntax/delete-predicate/). + - [Delete data](/influxdb/v2.3/write-data/delete-data/) + - Learn how to use [delete predicate syntax](/influxdb/v2.3/reference/syntax/delete-predicate/). required: true responses: '204': - description: > + description: | Success. - #### InfluxDB Cloud - - Validated and queued the request. + - Handles the delete asynchronously - the deletion might not have completed yet. - - Handles the delete asynchronously - the deletion might not have - completed yet. - - - An HTTP `2xx` status code acknowledges that the write or delete is - queued. - - To ensure that InfluxDB Cloud handles writes and deletes in the - order you request them, - + An HTTP `2xx` status code acknowledges that the write or delete is queued. + To ensure that InfluxDB Cloud handles writes and deletes in the order you request them, wait for a response before you send the next request. - Because writes are asynchronous, data might not yet be written - when you receive the response. - #### InfluxDB OSS - - Deleted the data. '400': content: @@ -10843,17 +10341,13 @@ paths: message: 'failed to decode request body: organization not found' schema: $ref: '#/components/schemas/Error' - description: > + description: | Bad request. - The response body contains detail about the error. - #### InfluxDB OSS - - - Returns this error if `org` or `orgID` doesn't match an - organization. + - Returns this error if `org` or `orgID` doesn't match an organization. '401': $ref: '#/components/responses/AuthorizationError' '404': @@ -10869,9 +10363,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request POST - INFLUX_URL/api/v2/delete?org=INFLUX_ORG&bucket=INFLUX_BUCKET \ + source: | + curl --request POST INFLUX_URL/api/v2/delete?org=INFLUX_ORG&bucket=INFLUX_BUCKET \ --header 'Authorization: Token INFLUX_API_TOKEN' \ --header 'Content-Type: application/json' \ --data '{ @@ -11088,9 +10581,7 @@ paths: application/json: schema: $ref: '#/components/schemas/UserResponse' - description: >- - Success. The response body contains the currently authenticated - user. + description: Success. The response body contains the currently authenticated user. '401': $ref: '#/components/responses/AuthorizationError' '500': @@ -11103,13 +10594,50 @@ paths: /api/v2/me/password: put: description: | + Updates the password for the signed-in [user](/influxdb/v2.3/reference/glossary/#user). + + This endpoint represents the third step in the following three-step process to let a + user with a user session update their password: + 1. Pass the user's [Basic authentication credentials](#section/Authentication/BasicAuthentication) to the `POST /api/v2/signin` + endpoint to create a user session and generate a session cookie. + 2. From the response in the first step, extract the session cookie (`Set-Cookie`) header. + 3. Pass the following in a request to the `PUT /api/v2/me/password` endpoint: + - The `Set-Cookie` header from the second step + - The `Authorization Basic` header with the user's _Basic authentication_ credentials + - `{"password": "NEW_PASSWORD"}` in the request body + #### InfluxDB Cloud - InfluxDB Cloud doesn't support changing user passwords through the API. - Use the InfluxDB Cloud user interface to update your password. + - Doesn't allow you to manage passwords through the API. + Use the InfluxDB Cloud user interface (UI) to update your password. + + #### Related endpoints + + - [Signin](#tag/Signin) + - [Signout](#tag/Signout) + - [Users](#tag/Users) + + #### Related guides + + - [InfluxDB Cloud - Change your password](/influxdb/cloud/account-management/change-password/) + - [InfluxDB OSS - Change your password](/influxdb/latest/users/change-password/) operationId: PutMePassword parameters: - $ref: '#/components/parameters/TraceSpan' + - description: | + The user session cookie for the + [user](/influxdb/v2.3/reference/glossary/#user) + signed in with [Basic authentication credentials](#section/Authentication/BasicAuthentication). + + #### Related guides + + - [Manage users](/influxdb/v2.4/users/) + example: influxdb-oss-session=19aaaZZZGOvP2GGryXVT2qYftlFKu3bIopurM6AGFow1yF1abhtOlbHfsc-d8gozZFC_6WxmlQIAwLMW5xs523w== + in: cookie + name: influxdb-oss-session + required: true + schema: + type: string requestBody: content: application/json: @@ -11119,13 +10647,20 @@ paths: required: true responses: '204': - description: Success. The password was updated. + description: Success. The password is updated. '400': - description: > + description: | Bad request. - InfluxDB Cloud doesn't support changing passwords through the API - and always responds with this status. + #### InfluxDB Cloud + + - Doesn't allow you to manage through the API; always responds with this status. + + #### InfluxDB OSS + + - Doesn't understand a value passed in the request. + '401': + $ref: '#/components/responses/AuthorizationError' default: content: application/json: @@ -11139,24 +10674,16 @@ paths: - Users /metrics: get: - description: > + description: | Returns metrics about the workload performance of an InfluxDB instance. - Use this endpoint to get performance, resource, and usage metrics. - #### Related guides - - - For the list of metrics categories, see [InfluxDB OSS - metrics](/influxdb/v2.3/reference/internals/metrics/). - - - Learn how to use InfluxDB to [scrape Prometheus - metrics](/influxdb/v2.3/write-data/developer-tools/scrape-prometheus-metrics/). - - - Learn how InfluxDB [parses the Prometheus exposition - format](/influxdb/v2.3/reference/prometheus-metrics/). + - For the list of metrics categories, see [InfluxDB OSS metrics](/influxdb/v2.3/reference/internals/metrics/). + - Learn how to use InfluxDB to [scrape Prometheus metrics](/influxdb/v2.3/write-data/developer-tools/scrape-prometheus-metrics/). + - Learn how InfluxDB [parses the Prometheus exposition format](/influxdb/v2.3/reference/prometheus-metrics/). operationId: GetMetrics parameters: - $ref: '#/components/parameters/TraceSpan' @@ -11167,50 +10694,30 @@ paths: examples: expositionResponse: summary: Metrics in plain text - value: > + value: | # HELP go_threads Number of OS threads created. - # TYPE go_threads gauge - go_threads 19 - - # HELP http_api_request_duration_seconds Time taken to - respond to HTTP request - + # HELP http_api_request_duration_seconds Time taken to respond to HTTP request # TYPE http_api_request_duration_seconds histogram - - http_api_request_duration_seconds_bucket{handler="platform",method="GET",path="/:fallback_path",response_code="200",status="2XX",user_agent="curl",le="0.005"} - 4 - - http_api_request_duration_seconds_bucket{handler="platform",method="GET",path="/:fallback_path",response_code="200",status="2XX",user_agent="curl",le="0.01"} - 4 - - http_api_request_duration_seconds_bucket{handler="platform",method="GET",path="/:fallback_path",response_code="200",status="2XX",user_agent="curl",le="0.025"} - 5 + http_api_request_duration_seconds_bucket{handler="platform",method="GET",path="/:fallback_path",response_code="200",status="2XX",user_agent="curl",le="0.005"} 4 + http_api_request_duration_seconds_bucket{handler="platform",method="GET",path="/:fallback_path",response_code="200",status="2XX",user_agent="curl",le="0.01"} 4 + http_api_request_duration_seconds_bucket{handler="platform",method="GET",path="/:fallback_path",response_code="200",status="2XX",user_agent="curl",le="0.025"} 5 schema: externalDocs: description: Prometheus exposition formats url: https://prometheus.io/docs/instrumenting/exposition_formats format: Prometheus text-based exposition type: string - description: > + description: | Success. The response body contains metrics in - - [Prometheus plain-text exposition - format](https://prometheus.io/docs/instrumenting/exposition_formats) - - Metrics contain a name, an optional set of key-value pairs, and a - value. - + [Prometheus plain-text exposition format](https://prometheus.io/docs/instrumenting/exposition_formats) + Metrics contain a name, an optional set of key-value pairs, and a value. The following descriptors precede each metric: - - `HELP`: description of the metric - - - `TYPE`: [Prometheus metric - type](https://prometheus.io/docs/concepts/metric_types/) (`counter`, - `gauge`, `histogram`, or `summary`) + - `TYPE`: [Prometheus metric type](https://prometheus.io/docs/concepts/metric_types/) (`counter`, `gauge`, `histogram`, or `summary`) default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error @@ -11226,9 +10733,7 @@ paths: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/Offset' - $ref: '#/components/parameters/Limit' - - description: >- - Only show notification endpoints that belong to specific - organization ID. + - description: Only show notification endpoints that belong to specific organization ID. in: query name: orgID required: true @@ -11510,9 +11015,7 @@ paths: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/Offset' - $ref: '#/components/parameters/Limit' - - description: >- - Only show notification rules that belong to a specific organization - ID. + - description: Only show notification rules that belong to a specific organization ID. in: query name: orgID required: true @@ -11523,9 +11026,7 @@ paths: name: checkID schema: type: string - - description: >- - Only return notification rules that "would match" statuses which - contain the tag key value pairs provided. + - description: Only return notification rules that "would match" statuses which contain the tag key value pairs provided. in: query name: tag schema: @@ -11842,29 +11343,19 @@ paths: - Rules /api/v2/orgs: get: - description: > - Retrieves a list of - [organizations](/influxdb/v2.3/reference/glossary/#organization/). - - - To limit which organizations are returned, pass query parameters in your - request. - - If no query parameters are passed, InfluxDB returns all organizations up - to the default `limit`. + description: | + Retrieves a list of [organizations](/influxdb/v2.3/reference/glossary/#organization/). + To limit which organizations are returned, pass query parameters in your request. + If no query parameters are passed, InfluxDB returns all organizations up to the default `limit`. #### InfluxDB Cloud - - - Only returns the organization that owns the token passed in the - request. - + - Only returns the organization that owns the token passed in the request. #### Related guides - - - [View organizations](/influxdb/v2.3/organizations/view-orgs/). + - [View organizations](/influxdb/v2.3/organizations/view-orgs/) operationId: GetOrgs parameters: - $ref: '#/components/parameters/TraceSpan' @@ -11935,22 +11426,16 @@ paths: - Organizations - Security and access endpoints post: - description: > - Creates an - [organization](/influxdb/v2.3/reference/glossary/#organization) - + description: | + Creates an [organization](/influxdb/v2.3/reference/glossary/#organization) and returns the newly created organization. - #### InfluxDB Cloud - - Doesn't allow you to use this endpoint to create organizations. - #### Related guides - - [Manage organizations](/influxdb/v2.3/organizations) operationId: PostOrgs parameters: @@ -12049,7 +11534,7 @@ paths: #### Related guides - - [Delete organization](/influxdb/v2.3/organizations/delete-orgs/) + - [Delete organizations](/influxdb/v2.3/organizations/delete-orgs/) operationId: DeleteOrgsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -12108,7 +11593,7 @@ paths: #### Related guides - - [View organization](/influxdb/v2.3/organizations/view-orgs/) + - [View organizations](/influxdb/v2.3/organizations/view-orgs/) operationId: GetOrgsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -12158,40 +11643,26 @@ paths: - Organizations - Security and access endpoints patch: - description: > + description: | Updates an organization. - Use this endpoint to update properties - (`name`, `description`) of an organization. - Updating an organization’s name affects all resources that reference the - organization by name, including the following: - - Queries - - Dashboards - - Tasks - - Telegraf configurations - - Templates - - If you change an organization name, be sure to update the organization - name - + If you change an organization name, be sure to update the organization name in these resources as well. - #### Related Guides - - [Update an organization](/influxdb/v2.3/organizations/update-org/) operationId: PatchOrgsID parameters: @@ -12236,51 +11707,35 @@ paths: - Organizations /api/v2/orgs/{orgID}/members: get: - description: > + description: | Retrieves a list of all users that belong to an organization. - InfluxDB [users](/influxdb/v2.3/reference/glossary/#user) have - permission to access InfluxDB. - [Members](/influxdb/v2.3/reference/glossary/#member) are users - within the organization. - #### InfluxDB Cloud - - Doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. #### Limitations - - Member permissions are separate from API token permissions. - - Member permissions are used in the context of the InfluxDB UI. - #### Required permissions - - `read-orgs INFLUX_ORG_ID` - - `INFLUX_ORG_ID` is the ID of the organization that you retrieve a list - of - + `INFLUX_ORG_ID` is the ID of the organization that you retrieve a list of members from. - #### Related guides - - [Manage users](/influxdb/v2.3/users/) - - [Manage members](/influxdb/v2.3/organizations/members/) operationId: GetOrgsIDMembers parameters: @@ -12316,11 +11771,9 @@ paths: status: active schema: $ref: '#/components/schemas/ResourceMembers' - description: > + description: | Success. - - The response body contains a list of all users within the - organization. + The response body contains a list of all users within the organization. '400': $ref: '#/components/responses/BadRequestError' '401': @@ -12353,48 +11806,33 @@ paths: - Organizations - Security and access endpoints post: - description: > - Adds a user to an organization. - + description: | + Add a user to an organization. InfluxDB [users](/influxdb/v2.3/reference/glossary/#user) have - permission to access InfluxDB. - [Members](/influxdb/v2.3/reference/glossary/#member) are users - within the organization. - #### InfluxDB Cloud - - Doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. #### Limitations - - Member permissions are separate from API token permissions. - - Member permissions are used in the context of the InfluxDB UI. - #### Required permissions - - `write-orgs INFLUX_ORG_ID` - - `INFLUX_ORG_ID` is the ID of the organization that you want add a member - to. - + `INFLUX_ORG_ID` is the ID of the organization that you want add a member to. #### Related guides - - [Manage users](/influxdb/v2.3/users/) - - [Manage members](/influxdb/v2.3/organizations/members/) operationId: PostOrgsIDMembers parameters: @@ -12458,9 +11896,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request POST - "http://localhost:8086/api/v2/orgs/INFLUX_ORG_ID/members \ + source: | + curl --request POST "http://localhost:8086/api/v2/orgs/INFLUX_ORG_ID/members \ --header "Authorization: Token INFLUX_API_TOKEN" \ --header "Accept: application/json" \ --header "Content-Type: application/json" \ @@ -12469,44 +11906,31 @@ paths: }' /api/v2/orgs/{orgID}/members/{userID}: delete: - description: > + description: | Removes a member from an organization. - - Use this endpoint to remove a user's member privileges from a bucket. - This - + Use this endpoint to remove a user's member privileges from a bucket. This removes the user's `read` and `write` permissions from the organization. - #### InfluxDB Cloud - - Doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. #### Limitations - - Member permissions are separate from API token permissions. - - Member permissions are used in the context of the InfluxDB UI. - #### Required permissions - - `write-orgs INFLUX_ORG_ID` - `INFLUX_ORG_ID` is the ID of the organization that you want to remove an - owner from. - #### Related guides - - [Manage members](/influxdb/v2.3/organizations/members/) operationId: DeleteOrgsIDMembersID parameters: @@ -12546,25 +11970,19 @@ paths: - Security and access endpoints /api/v2/orgs/{orgID}/owners: get: - description: > + description: | Retrieves a list of all owners of an organization. - #### InfluxDB Cloud - - Doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. #### Required permissions - - `read-orgs INFLUX_ORG_ID` - - `INFLUX_ORG_ID` is the ID of the organization that you want to retrieve - a - + `INFLUX_ORG_ID` is the ID of the organization that you want to retrieve a list of owners from. operationId: GetOrgsIDOwners parameters: @@ -12612,32 +12030,24 @@ paths: - Organizations - Security and access endpoints post: - description: > + description: | Adds an owner to an organization. - Use this endpoint to assign the organization `owner` role to a user. - #### InfluxDB Cloud - - Doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. #### Required permissions - - `write-orgs INFLUX_ORG_ID` - - `INFLUX_ORG_ID` is the ID of the organization that you want add an owner - for. - + `INFLUX_ORG_ID` is the ID of the organization that you want add an owner for. #### Related endpoints - - [Authorizations](#tag/Authorizations) operationId: PostOrgsIDOwners parameters: @@ -12693,9 +12103,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request POST - "http://localhost:8086/api/v2/orgs/INFLUX_ORG_ID/owners \ + source: | + curl --request POST "http://localhost:8086/api/v2/orgs/INFLUX_ORG_ID/owners \ --header "Authorization: Token INFLUX_API_TOKEN" \ --header "Accept: application/json" \ --header "Content-Type: application/json" \ @@ -12704,45 +12113,30 @@ paths: }' /api/v2/orgs/{orgID}/owners/{userID}: delete: - description: > + description: | Removes an [owner](/influxdb/v2.3/reference/glossary/#owner) from - the organization. - - Organization owners have permission to delete organizations and remove - user and member - + Organization owners have permission to delete organizations and remove user and member permissions from the organization. - #### InfluxDB Cloud - - Doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. #### Limitations - - Owner permissions are separate from API token permissions. - - Owner permissions are used in the context of the InfluxDB UI. - #### Required permissions - - `write-orgs INFLUX_ORG_ID` - - `INFLUX_ORG_ID` is the ID of the organization that you want remove an - owner - + `INFLUX_ORG_ID` is the ID of the organization that you want remove an owner from. - #### Related endpoints - - [Authorizations](#tag/Authorizations) operationId: DeleteOrgsIDOwnersID parameters: @@ -12943,44 +12337,29 @@ paths: - Ping /api/v2/query: post: - description: > + description: | Retrieves data from buckets. - - Use this endpoint to send a Flux query request and retrieve data from a - bucket. - + Use this endpoint to send a Flux query request and retrieve data from a bucket. #### Rate limits (with InfluxDB Cloud) - `read` rate limits apply. - - For more information, see [limits and adjustable - quotas](/influxdb/cloud/account-management/limits/). - + For more information, see [limits and adjustable quotas](/influxdb/cloud/account-management/limits/). #### Related guides - - - [Query with the InfluxDB - API](/influxdb/v2.3/query-data/execute-queries/influx-api/). - - - [Get started with - Flux](https://docs.influxdata.com/flux/v0.x/get-started/) + - [Query with the InfluxDB API](/influxdb/v2.3/query-data/execute-queries/influx-api/) + - [Get started with Flux](https://docs.influxdata.com/flux/v0.x/get-started/) operationId: PostQuery parameters: - $ref: '#/components/parameters/TraceSpan' - - description: >- - The content encoding (usually a compression algorithm) that the - client can understand. + - description: The content encoding (usually a compression algorithm) that the client can understand. in: header name: Accept-Encoding schema: default: identity - description: >- - The content coding. Use `gzip` for compressed data or `identity` - for unmodified, uncompressed data. + description: The content coding. Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity @@ -12992,43 +12371,31 @@ paths: - application/json - application/vnd.flux type: string - - description: > + - description: | The name or ID of the organization executing the query. - #### InfluxDB Cloud - - Doesn't use `org` or `orgID`. - - - Queries the bucket in the organization associated with the - authorization (API token). - + - Queries the bucket in the organization associated with the authorization (API token). #### InfluxDB OSS - - Requires either `org` or `orgID`. in: query name: org schema: type: string - - description: > + - description: | The ID of the organization executing the query. - #### InfluxDB Cloud - - Doesn't use `org` or `orgID`. - - - Queries the bucket in the organization associated with the - authorization (API token). - + - Queries the bucket in the organization associated with the authorization (API token). #### InfluxDB OSS - - Requires either `org` or `orgID`. in: query name: orgID @@ -13051,27 +12418,21 @@ paths: '200': content: application/csv: - example: > + example: | result,table,_start,_stop,_time,region,host,_value - mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43 - mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25 - mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62 schema: type: string description: Success. The response body contains query results. headers: Content-Encoding: - description: >- - Lists encodings (usually compression algorithms) that have been - applied to the response payload. + description: Lists encodings (usually compression algorithms) that have been applied to the response payload. schema: default: identity - description: > - The content coding: `gzip` for compressed data or `identity` - for unmodified, uncompressed data. + description: | + The content coding: `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity @@ -13092,17 +12453,13 @@ paths: message: 'failed to decode request body: organization not found' schema: $ref: '#/components/schemas/Error' - description: > + description: | Bad request. - The response body contains detail about the error. - #### InfluxDB OSS - - - Returns this error if `org` or `orgID` doesn't match an - organization. + - Returns this error if `org` or `orgID` doesn't match an organization. '401': $ref: '#/components/responses/AuthorizationError' '404': @@ -13120,9 +12477,7 @@ paths: - doesn't return this error. headers: Retry-After: - description: >- - Non-negative decimal integer indicating seconds to wait before - retrying the request. + description: Non-negative decimal integer indicating seconds to wait before retrying the request. schema: format: int32 type: integer @@ -13147,13 +12502,10 @@ paths: |> filter(fn: (r) => r._measurement == "example-measurement")' /api/v2/query/analyze: post: - description: > - Analyzes a [Flux query](https://docs.influxdata.com/flux/v0.x/) for - syntax - + description: | + Analyzes a [Flux query](https://docs.influxdata.com/flux/v0.x/) for syntax errors and returns the list of errors. - In the following sample query, `from()` is missing the property key. ```json @@ -13165,14 +12517,10 @@ paths: ``` If you pass this in a request to the `/api/v2/analyze` endpoint, - - InfluxDB returns an `errors` list that contains an error object for the - missing key. - + InfluxDB returns an `errors` list that contains an error object for the missing key. #### Limitations - - The endpoint doesn't validate values in the query--for example: - The following sample query has correct syntax, but contains an incorrect `from()` property key: @@ -13208,22 +12556,17 @@ paths: application/json: examples: missingQueryPropertyKey: - description: > - Returns an error object if the Flux query is missing a - property key. + description: | + Returns an error object if the Flux query is missing a property key. + The following sample query is missing the _`bucket`_ property key: - The following sample query is missing the _`bucket`_ - property key: - - ```json - { - "query": "from(: \"iot_center\")\ - - ... - - } - ``` + ```json + { + "query": "from(: \"iot_center\")\ + ... + } + ``` summary: Missing property key error value: errors: @@ -13233,27 +12576,20 @@ paths: message: missing property key schema: $ref: '#/components/schemas/AnalyzeQueryResponse' - description: > + description: | Success. - The response body contains the list of `errors`. - - If the query syntax is valid, the endpoint returns an empty `errors` - list. + If the query syntax is valid, the endpoint returns an empty `errors` list. '400': content: application/json: examples: invalidJSONStringValue: - description: >- - If the request body contains invalid JSON, returns `invalid` - and problem detail. + description: If the request body contains invalid JSON, returns `invalid` and problem detail. summary: Invalid JSON value: code: invalid - message: >- - invalid json: invalid character '\'' looking for beginning - of value + message: 'invalid json: invalid character ''\'''' looking for beginning of value' schema: $ref: '#/components/schemas/Error' description: | @@ -13272,9 +12608,8 @@ paths: application/json: examples: emptyJSONObject: - description: > - If the request body contains an empty JSON object, returns - `internal error`. + description: | + If the request body contains an empty JSON object, returns `internal error`. summary: Empty JSON object in request body value: code: internal error @@ -13317,33 +12652,20 @@ paths: EOF /api/v2/query/ast: post: - description: > - Analyzes a Flux query and returns a complete package source [Abstract - Syntax - + description: | + Analyzes a Flux query and returns a complete package source [Abstract Syntax Tree (AST)](/influxdb/v2.3/reference/glossary/#abstract-syntax-tree-ast) - for the query. - - Use this endpoint for deep query analysis such as debugging unexpected - query - + Use this endpoint for deep query analysis such as debugging unexpected query results. - - A Flux query AST provides a semantic, tree-like representation with - contextual - - information about the query. The AST illustrates how the query is - distributed - + A Flux query AST provides a semantic, tree-like representation with contextual + information about the query. The AST illustrates how the query is distributed into different components for execution. - #### Limitations - - The endpoint doesn't validate values in the query--for example: The following sample Flux query has correct syntax, but contains an incorrect `from()` property key: @@ -13363,6 +12685,7 @@ paths: ``` The following code sample shows how to pass the query as JSON in the request body: + ```json { "query": "from(foo: \"iot_center\")\ |> range(start: -90d)\ @@ -13542,9 +12865,7 @@ paths: end: column: 47 line: 1 - source: >- - from(bucket: "example-bucket") |> - range(start: -5m) + source: 'from(bucket: "example-bucket") |> range(start: -5m)' start: column: 1 line: 1 @@ -13555,9 +12876,7 @@ paths: end: column: 108 line: 1 - source: >- - fn: (r) => r._measurement == - "example-measurement" + source: 'fn: (r) => r._measurement == "example-measurement"' start: column: 58 line: 1 @@ -13577,9 +12896,7 @@ paths: end: column: 108 line: 1 - source: >- - fn: (r) => r._measurement == - "example-measurement" + source: 'fn: (r) => r._measurement == "example-measurement"' start: column: 58 line: 1 @@ -13643,9 +12960,7 @@ paths: end: column: 108 line: 1 - source: >- - (r) => r._measurement == - "example-measurement" + source: (r) => r._measurement == "example-measurement" start: column: 62 line: 1 @@ -13688,9 +13003,7 @@ paths: end: column: 109 line: 1 - source: >- - filter(fn: (r) => r._measurement == - "example-measurement") + source: 'filter(fn: (r) => r._measurement == "example-measurement")' start: column: 51 line: 1 @@ -13699,10 +13012,7 @@ paths: end: column: 109 line: 1 - source: >- - from(bucket: "example-bucket") |> - range(start: -5m) |> filter(fn: (r) => - r._measurement == "example-measurement") + source: 'from(bucket: "example-bucket") |> range(start: -5m) |> filter(fn: (r) => r._measurement == "example-measurement")' start: column: 1 line: 1 @@ -13711,10 +13021,7 @@ paths: end: column: 109 line: 1 - source: >- - from(bucket: "example-bucket") |> range(start: - -5m) |> filter(fn: (r) => r._measurement == - "example-measurement") + source: 'from(bucket: "example-bucket") |> range(start: -5m) |> filter(fn: (r) => r._measurement == "example-measurement")' start: column: 1 line: 1 @@ -13724,10 +13031,7 @@ paths: end: column: 109 line: 1 - source: >- - from(bucket: "example-bucket") |> range(start: - -5m) |> filter(fn: (r) => r._measurement == - "example-measurement") + source: 'from(bucket: "example-bucket") |> range(start: -5m) |> filter(fn: (r) => r._measurement == "example-measurement")' start: column: 1 line: 1 @@ -13738,20 +13042,16 @@ paths: type: Package schema: $ref: '#/components/schemas/ASTResponse' - description: > + description: | Success. - - The response body contains an Abstract Syntax Tree (AST) of the Flux - query. + The response body contains an Abstract Syntax Tree (AST) of the Flux query. '400': content: application/json: examples: invalidASTValue: - description: > - If the request body contains a missing property key in - `from()`, - + description: | + If the request body contains a missing property key in `from()`, returns `invalid` and problem detail. summary: Invalid AST value: @@ -13796,48 +13096,31 @@ paths: EOL /api/v2/query/suggestions: get: - description: > + description: | Retrieves a list of Flux query suggestions. Each suggestion contains a - - [Flux - function](https://docs.influxdata.com/flux/v0.x/stdlib/all-functions/) - + [Flux function](https://docs.influxdata.com/flux/v0.x/stdlib/all-functions/) name and parameters. - - Use this endpoint to retrieve a list of Flux query suggestions used in - the - - InfluxDB Flux Query Builder. Helper function names have an underscore - (`_`) - + Use this endpoint to retrieve a list of Flux query suggestions used in the + InfluxDB Flux Query Builder. Helper function names have an underscore (`_`) prefix and aren't meant to be used directly in queries--for example: - - **Recommended**: Use `top(n, columns=["_value"], tables=<-)` to sort on a column and keep the top n records instead of `_sortLimit_`. `top` uses the `_sortLimit` helper function. #### Limitations - - Using `/api/v2/query/suggestions/` (note the trailing slash) with cURL - will result in a HTTP `301 Moved Permanently` status code. Please use - `/api/v2/query/suggestions` without a trailing slash. - - When writing a query, avoid using `_functionName()` helper functions - exposed by this endpoint. - #### Related Guides - - - [List of all Flux - functions](/influxdb/v2.3/flux/v0.x/stdlib/all-functions/). + - [List of all Flux functions](/influxdb/v2.3/flux/v0.x/stdlib/all-functions/) operationId: GetQuerySuggestions parameters: - $ref: '#/components/parameters/TraceSpan' @@ -14431,26 +13714,20 @@ paths: tables: stream schema: $ref: '#/components/schemas/FluxSuggestions' - description: > + description: | Success. - - The response body contains a list of Flux query - suggestions--function - + The response body contains a list of Flux query suggestions--function names used in the Flux Query Builder autocomplete suggestions. '301': content: text/html: examples: movedPermanently: - description: > - The URL has been permanently moved. Use - `/api/v2/query/suggestions`. + description: | + The URL has been permanently moved. Use `/api/v2/query/suggestions`. summary: Invalid URL - value: > - Moved - Permanently + value: | + Moved Permanently schema: properties: body: @@ -14473,45 +13750,30 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request GET - "https://cloud2.influxdata.com/api/v2/query/suggestions" \ + source: | + curl --request GET "https://cloud2.influxdata.com/api/v2/query/suggestions" \ --header "Accept: application/json" \ --header "Authorization: Token INFLUX_API_TOKEN" /api/v2/query/suggestions/{name}: get: - description: > - Retrieves a query suggestion that contains the name and parameters of - the - + description: | + Retrieves a query suggestion that contains the name and parameters of the requested function. - - Use this endpoint to pass a branching suggestion (a Flux function name) - and - + Use this endpoint to pass a branching suggestion (a Flux function name) and retrieve the parameters of the requested function. - #### Limitations - - Use `/api/v2/query/suggestions/{name}` (without a trailing slash). - - `/api/v2/query/suggestions/{name}/` (note the trailing slash) results in - a - + `/api/v2/query/suggestions/{name}/` (note the trailing slash) results in a HTTP `301 Moved Permanently` status. - - The function `name` must exist and must be spelled correctly. - #### Related Guides - - - [List of all Flux - functions](/influxdb/v2.3/flux/v0.x/stdlib/all-functions/). + - [List of all Flux functions](/influxdb/v2.3/flux/v0.x/stdlib/all-functions/) operationId: GetQuerySuggestionsName parameters: - $ref: '#/components/parameters/TraceSpan' @@ -14561,9 +13823,8 @@ paths: x-codeSamples: - label: cURL lang: Shell - source: > - curl --request GET - "https://cloud2.influxdata.com/api/v2/query/suggestions/sum/" \ + source: | + curl --request GET "https://cloud2.influxdata.com/api/v2/query/suggestions/sum/" \ --header "Accept: application/json" \ --header "Authorization: Token INFLUX_API_TOKEN" /ready: @@ -14983,19 +14244,14 @@ paths: operationId: PostRestoreKV parameters: - $ref: '#/components/parameters/TraceSpan' - - description: > - The value tells InfluxDB what compression is applied to the line - protocol in the request payload. - - To make an API request with a GZIP payload, send `Content-Encoding: - gzip` as a request header. + - description: | + The value tells InfluxDB what compression is applied to the line protocol in the request payload. + To make an API request with a GZIP payload, send `Content-Encoding: gzip` as a request header. in: header name: Content-Encoding schema: default: identity - description: >- - The content coding. Use `gzip` for compressed data or `identity` - for unmodified, uncompressed data. + description: The content coding. Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity @@ -15022,9 +14278,7 @@ paths: schema: properties: token: - description: >- - token is the root token for the instance after restore - (this is overwritten during the restore) + description: token is the root token for the instance after restore (this is overwritten during the restore) type: string type: object description: KV store successfully overwritten. @@ -15041,19 +14295,14 @@ paths: operationId: PostRestoreShardId parameters: - $ref: '#/components/parameters/TraceSpan' - - description: > - The value tells InfluxDB what compression is applied to the line - protocol in the request payload. - - To make an API request with a GZIP payload, send `Content-Encoding: - gzip` as a request header. + - description: | + The value tells InfluxDB what compression is applied to the line protocol in the request payload. + To make an API request with a GZIP payload, send `Content-Encoding: gzip` as a request header. in: header name: Content-Encoding schema: default: identity - description: >- - Specifies that the line protocol in the body is encoded with gzip - or not encoded with identity. + description: Specifies that the line protocol in the body is encoded with gzip or not encoded with identity. enum: - gzip - identity @@ -15093,19 +14342,14 @@ paths: operationId: PostRestoreSQL parameters: - $ref: '#/components/parameters/TraceSpan' - - description: > - The value tells InfluxDB what compression is applied to the line - protocol in the request payload. - - To make an API request with a GZIP payload, send `Content-Encoding: - gzip` as a request header. + - description: | + The value tells InfluxDB what compression is applied to the line protocol in the request payload. + To make an API request with a GZIP payload, send `Content-Encoding: gzip` as a request header. in: header name: Content-Encoding schema: default: identity - description: >- - Specifies that the line protocol in the body is encoded with gzip - or not encoded with identity. + description: Specifies that the line protocol in the body is encoded with gzip or not encoded with identity. enum: - gzip - identity @@ -15131,9 +14375,7 @@ paths: default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error - summary: >- - Overwrite the embedded SQL store on the server with a backed-up - snapshot. + summary: Overwrite the embedded SQL store on the server with a backed-up snapshot. tags: - Restore /api/v2/scrapers: @@ -15146,9 +14388,7 @@ paths: name: name schema: type: string - - description: >- - List of scraper target IDs to return. If both `id` and `owner` are - specified, only `id` is used. + - description: List of scraper target IDs to return. If both `id` and `owner` are specified, only `id` is used. in: query name: id schema: @@ -15559,9 +14799,7 @@ paths: - Scraper Targets /api/v2/setup: get: - description: >- - Returns `true` if no default user, organization, or bucket has been - created. + description: Returns `true` if no default user, organization, or bucket has been created. operationId: GetSetup parameters: - $ref: '#/components/parameters/TraceSpan' @@ -15602,27 +14840,67 @@ paths: - Setup /api/v2/signin: post: - description: >- - Authenticates ***Basic Auth*** credentials for a user. If successful, - creates a new UI session for the user. + description: | + Authenticates [Basic authentication credentials](#section/Authentication/BasicAuthentication) + for a [user](/influxdb/v2.3/reference/glossary/#user), + and then, if successful, generates a user session. + + To authenticate a user, pass the HTTP `Authorization` header with the + `Basic` scheme and the base64-encoded username and password--for example: + + ```sh + Authorization: Basic USERNAME:PASSWORD + ``` + + In InfluxDB Cloud, the username is the email address the user signed up with. + + _Note that many HTTP clients provide a Basic authentication option that + accepts the `USERNAME:PASSWORD` syntax and encodes the credentials before + sending the request. + To learn more about HTTP authentication, see + [Mozilla Developer Network (MDN) Web Docs, HTTP authentication](https://developer.mozilla.org/en-US/docs/Web/HTTP/Authentication)._ + + If authentication is successful, InfluxDB creates a new session for the user + and then returns the session cookie in the `Set-Cookie` response header. + User sessions exist only in memory. + They expire within ten minutes and during restarts of the InfluxDB instance. + + #### User sessions with authorizations + + - In InfluxDB Cloud, a user session inherits all the user's permissions for + the organization. + - In InfluxDB OSS, a user session inherits all the user's permissions for all + the organizations that the user belongs to. + + #### Related endpoints + + - [Signout](#tag/Signout) operationId: PostSignin parameters: - $ref: '#/components/parameters/TraceSpan' responses: '204': - description: Success. User authenticated. + description: | + Success. + The user is authenticated. + The `Set-Cookie` response header contains the session cookie. '401': content: application/json: schema: $ref: '#/components/schemas/Error' - description: Unauthorized access. + description: | + Unauthorized. + This error may be caused by one of the following problems: + - The user doesn't have access. + - The user passed incorrect credentials in the request. + - The credentials are formatted incorrectly in the request. '403': content: application/json: schema: $ref: '#/components/schemas/Error' - description: User account is disabled. + description: Forbidden. The user account is disabled. default: content: application/json: @@ -15634,6 +14912,12 @@ paths: summary: Create a user session. tags: - Signin + x-codeSamples: + - label: 'cURL: signin with --user option encoding' + lang: Shell + source: | + curl --request POST http://localhost:8086/api/v2/signin \ + --user "USERNAME:PASSWORD" /api/v2/signout: post: description: Expires the current UI session for the user. @@ -15906,21 +15190,15 @@ paths: required: true schema: type: string - - description: > + - description: | The stack name. - Finds stack `events` with this name and returns the stacks. - Repeatable. - To filter for more than one stack name, - repeat this parameter with each name--for example: - - - - `http://localhost:8086/api/v2/stacks?&orgID=INFLUX_ORG_ID&name=project-stack-0&name=project-stack-1` + - `http://localhost:8086/api/v2/stacks?&orgID=INFLUX_ORG_ID&name=project-stack-0&name=project-stack-1` examples: findStackByName: summary: Find stacks with the event name @@ -15929,21 +15207,15 @@ paths: name: name schema: type: string - - description: > + - description: | The stack ID. - Only returns stacks with this ID. - Repeatable. - To filter for more than one stack ID, - repeat this parameter with each ID--for example: - - - - `http://localhost:8086/api/v2/stacks?&orgID=INFLUX_ORG_ID&stackID=09bd87cd33be3000&stackID=09bef35081fe3000` + - `http://localhost:8086/api/v2/stacks?&orgID=INFLUX_ORG_ID&stackID=09bd87cd33be3000&stackID=09bef35081fe3000` examples: findStackByID: summary: Find a stack with the ID @@ -15972,29 +15244,21 @@ paths: summary: The orgID query parameter is missing value: code: invalid - message: >- - organization id[""] is invalid: id must have a length of - 16 bytes + message: 'organization id[""] is invalid: id must have a length of 16 bytes' orgProvidedNotFound: - summary: >- - The org or orgID passed doesn't own the token passed in the - header + summary: The org or orgID passed doesn't own the token passed in the header value: code: invalid message: 'failed to decode request body: organization not found' schema: $ref: '#/components/schemas/Error' - description: > + description: | Bad request. - The response body contains detail about the error. - #### InfluxDB OSS - - - Returns this error if an incorrect value is passed for `org` or - `orgID`. + - Returns this error if an incorrect value is passed for `org` or `orgID`. '401': $ref: '#/components/responses/AuthorizationError' '500': @@ -16009,13 +15273,10 @@ paths: tags: - Templates post: - description: > + description: | Creates or initializes a stack. - - Use this endpoint to _manually_ initialize a new stack with the - following - + Use this endpoint to _manually_ initialize a new stack with the following optional information: - Stack name @@ -16023,23 +15284,16 @@ paths: - URLs for template manifest files To automatically create a stack when applying templates, - use the [/api/v2/templates/apply endpoint](#operation/ApplyTemplate). - #### Required permissions - - `write` permission for the organization - #### Related guides - - [InfluxDB stacks](/influxdb/v2.3/influxdb-templates/stacks/) - - - [Use InfluxDB - templates](/influxdb/v2.3/influxdb-templates/use/#apply-templates-to-an-influxdb-instance) + - [Use InfluxDB templates](/influxdb/v2.3/influxdb-templates/use/#apply-templates-to-an-influxdb-instance) operationId: CreateStack requestBody: content: @@ -16074,16 +15328,12 @@ paths: application/json: schema: $ref: '#/components/schemas/Error' - description: > + description: | Unprocessable entity. - The error may indicate one of the following problems: - - - The request body isn't valid--the request is well-formed, but - InfluxDB can't process it due to semantic errors. - + - The request body isn't valid--the request is well-formed, but InfluxDB can't process it due to semantic errors. - You passed a parameter combination that InfluxDB doesn't support. '500': $ref: '#/components/responses/InternalServerError' @@ -16236,57 +15486,57 @@ paths: - Templates /api/v2/tasks: get: - description: > - Retrieves a list of [tasks](/influxdb/v2.3/process-data/). + description: | + Retrieves a list of [tasks](/influxdb/v2.3/reference/glossary/#task). + To limit which tasks are returned, pass query parameters in your request. + If no query parameters are passed, InfluxDB returns all tasks up to the default `limit`. - To limit which tasks are returned, pass query parameters in your - request. + #### Related guide - If no query parameters are passed, InfluxDB returns all tasks up to the - default `limit`. + - [Process data with InfluxDB tasks](/influxdb/v2.3/process-data/) operationId: GetTasks parameters: - $ref: '#/components/parameters/TraceSpan' - description: | - Task name. - Only returns tasks with this name. + A [task](/influxdb/v2.3/reference/glossary/#task) name. + Only returns tasks with the specified name. Different tasks may have the same name. in: query name: name schema: type: string - description: | - Task ID. - Only returns tasks created after this task. + A [task](/influxdb/v2.3/reference/glossary/#task) ID. + Only returns tasks created after the specified task. in: query name: after schema: type: string - description: | - User ID. - Only returns tasks owned by this user. + A [user](/influxdb/v2.3/reference/glossary/#user) ID. + Only returns tasks owned by the specified user. in: query name: user schema: type: string - description: | - Organization name. - Only returns tasks owned by this organization. + An [organization](/influxdb/v2.3/reference/glossary/#organization) name. + Only returns tasks owned by the specified organization. in: query name: org schema: type: string - description: | - Organization ID. - Only returns tasks owned by this organization. + An [organization](/influxdb/v2.3/reference/glossary/#organization) ID. + Only returns tasks owned by the specified organization. in: query name: orgID schema: type: string - description: | - Task status (`active` or `inactive`). - Only returns tasks with this status. + A [task](/influxdb/v2.3/reference/glossary/#task) status. + Only returns tasks that have the specified status (`active` or `inactive`). in: query name: status schema: @@ -16295,8 +15545,12 @@ paths: - inactive type: string - description: | - Limits the number of tasks returned. - The minimum is `1`, the maximum is `500`, and the default is `100`. + The maximum number of [tasks](/influxdb/v2.3/reference/glossary/#task) to return. + Default is `100`. + The minimum is `1` and the maximum is `500`. + + To reduce the payload size, combine _`type=basic`_ and _`limit`_ (see _Request samples_). + For more information about the `basic` response, see the _`type`_ parameter. in: query name: limit schema: @@ -16304,15 +15558,12 @@ paths: maximum: 500 minimum: 1 type: integer - - description: > - Task type (`basic` or `system`). - - - The default (`system`) response contains all the metadata properties - for tasks. - - To reduce the payload size, pass `basic` to omit some task - properties (`flux`, `createdAt`, `updatedAt`) from the response. + - description: | + A [task](/influxdb/v2.3/reference/glossary/#task) type (`basic` or `system`). + Default is `system`. + Specifies the level of detail for tasks in the response. + The default (`system`) response contains all the metadata properties for tasks. + To reduce the response size, pass `basic` to omit some task properties (`flux`, `createdAt`, `updatedAt`). in: query name: type required: false @@ -16328,7 +15579,10 @@ paths: application/json: examples: basicTypeTaskOutput: - description: Task fields returned with `?type=basic` + description: | + A sample response body for the `?type=basic` parameter. + `type=basic` omits some task fields (`createdAt` and `updatedAt`) + and field values (`org`, `flux`) in the response. summary: Basic output value: links: @@ -16353,7 +15607,9 @@ paths: ownerID: 0772396d1f411000 status: active systemTypeTaskOutput: - description: Task fields returned with `?type=system` + description: | + A sample response body for the `?type=system` parameter. + `type=system` returns all task fields. summary: System output value: links: @@ -16409,22 +15665,15 @@ paths: --header 'Content-Type: application/json' \ --header 'Authorization: Token INFLUX_API_TOKEN' post: - description: > - Creates a [task](/influxdb/v2.3/process-data/) and returns the created - task. - + description: | + Creates a [task](/influxdb/v2.3/reference/glossary/#task) and returns the task. #### Related guides - - [Get started with tasks](/influxdb/v2.3/process-data/get-started/) - - [Create a task](/influxdb/v2.3/process-data/manage-tasks/create-task/) - - [Common tasks](/influxdb/v2.3/process-data/common-tasks/) - - - [Task configuration - options](/influxdb/v2.3/process-data/task-options/) + - [Task configuration options](/influxdb/v2.3/process-data/task-options/) operationId: PostTasks parameters: - $ref: '#/components/parameters/TraceSpan' @@ -16441,9 +15690,7 @@ paths: application/json: schema: $ref: '#/components/schemas/Task' - description: >- - Success. The response body contains a `tasks` list with the new - task. + description: Success. The response body contains a `tasks` list with the task. '400': content: application/json: @@ -16454,25 +15701,19 @@ paths: code: invalid message: 'failed to decode request: missing flux' orgProvidedNotFound: - summary: >- - The org or orgID passed doesn't own the token passed in the - header + summary: The org or orgID passed doesn't own the token passed in the header value: code: invalid message: 'failed to decode request body: organization not found' schema: $ref: '#/components/schemas/Error' - description: > + description: | Bad request. - The response body contains detail about the error. - #### InfluxDB OSS - - - Returns this error if an incorrect value is passed for `org` or - `orgID`. + - Returns this error if an incorrect value is passed for `org` or `orgID`. '401': $ref: '#/components/responses/AuthorizationError' '500': @@ -16507,19 +15748,13 @@ paths: EOF /api/v2/tasks/{taskID}: delete: - description: > + description: | Deletes a task and associated records. + Use this endpoint to delete a task and all associated records (task runs, logs, and labels). + Once the task is deleted, InfluxDB cancels all scheduled runs of the task. - Use this endpoint to delete a task and all associated records (task - runs, logs, and labels). - - Once the task is deleted, InfluxDB cancels all scheduled runs of the - task. - - - If you want to disable a task instead of delete it, [update the task - status to `inactive`](#operation/PatchTasksID). + If you want to disable a task instead of delete it, [update the task status to `inactive`](#operation/PatchTasksID). operationId: DeleteTasksID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -16579,24 +15814,15 @@ paths: - Data I/O endpoints - Tasks patch: - description: > + description: | Updates a task and then cancels all scheduled runs of the task. + Use this endpoint to set, modify, and clear task properties (for example: `cron`, `name`, `flux`, `status`). + Once InfluxDB applies the update, it cancels all previously scheduled runs of the task. - Use this endpoint to set, modify, and clear task properties (for - example: `cron`, `name`, `flux`, `status`). - - Once InfluxDB applies the update, it cancels all previously scheduled - runs of the task. - - - To update a task, pass an object that contains the updated key-value - pairs. - + To update a task, pass an object that contains the updated key-value pairs. To activate or inactivate a task, set the `status` property. - - _`"status": "inactive"`_ cancels scheduled runs and prevents manual runs - of the task. + _`"status": "inactive"`_ cancels scheduled runs and prevents manual runs of the task. operationId: PatchTasksID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -16654,9 +15880,7 @@ paths: application/json: schema: $ref: '#/components/schemas/LabelsResponse' - description: >- - Success. The response body contains a list of all labels for the - task. + description: Success. The response body contains a list of all labels for the task. '400': $ref: '#/components/responses/BadRequestError' '401': @@ -16671,12 +15895,10 @@ paths: tags: - Tasks post: - description: > + description: | Adds a label to a task. - - Use this endpoint to add a label that you can use to filter tasks in the - InfluxDB UI. + Use this endpoint to add a label that you can use to filter tasks in the InfluxDB UI. operationId: PostTasksIDLabels parameters: - $ref: '#/components/parameters/TraceSpan' @@ -16699,9 +15921,7 @@ paths: application/json: schema: $ref: '#/components/schemas/LabelResponse' - description: >- - Success. The response body contains a list of all labels for the - task. + description: Success. The response body contains a list of all labels for the task. '400': $ref: '#/components/responses/BadRequestError' '401': @@ -16752,20 +15972,13 @@ paths: - Tasks /api/v2/tasks/{taskID}/logs: get: - description: > - Retrieves a list of all logs for a - [task](/influxdb/v2.3/reference/glossary/#task). - - - When an InfluxDB task runs, a “run” record is created in the task’s - history. - - Logs associated with each run provide relevant log messages, timestamps, - and the exit status of the run attempt. + description: | + Retrieves a list of all logs for a [task](/influxdb/v2.3/reference/glossary/#task). + When an InfluxDB task runs, a “run” record is created in the task’s history. + Logs associated with each run provide relevant log messages, timestamps, and the exit status of the run attempt. Use this endpoint to retrieve only the log events for a task, - without additional task metadata. operationId: GetTasksIDLogs parameters: @@ -16785,32 +15998,20 @@ paths: summary: Events for a failed task run. value: events: - - message: >- - Started task from script: "option task = {name: \"test - task\", every: 3d, offset: 0s}" + - message: 'Started task from script: "option task = {name: \"test task\", every: 3d, offset: 0s}"' runID: 09a946fc3167d000 time: '2022-07-13T07:06:54.198167Z' - message: Completed(failed) runID: 09a946fc3167d000 time: '2022-07-13T07:07:13.104037Z' - - message: >- - error exhausting result iterator: error in query - specification while starting program: this Flux script - returns no streaming data. Consider adding a "yield" - or invoking streaming functions directly, without - performing an assignment + - message: 'error exhausting result iterator: error in query specification while starting program: this Flux script returns no streaming data. Consider adding a "yield" or invoking streaming functions directly, without performing an assignment' runID: 09a946fc3167d000 time: '2022-07-13T08:24:37.115323Z' taskSuccess: summary: Events for a successful task run. value: events: - - message: >- - Started task from script: "option task = {name: - \"task1\", every: 30m} from(bucket: \"iot_center\") |> - range(start: -90d) |> filter(fn: (r) => r._measurement - == \"environment\") |> aggregateWindow(every: 1h, fn: - mean)" + - message: 'Started task from script: "option task = {name: \"task1\", every: 30m} from(bucket: \"iot_center\") |> range(start: -90d) |> filter(fn: (r) => r._measurement == \"environment\") |> aggregateWindow(every: 1h, fn: mean)"' runID: 09b070dadaa7d000 time: '2022-07-18T14:46:07.101231Z' - message: Completed(success) @@ -16818,14 +16019,10 @@ paths: time: '2022-07-18T14:46:07.242859Z' schema: $ref: '#/components/schemas/Logs' - description: > - Success. The response body contains an `events` list with logs for - the task. - + description: | + Success. The response body contains an `events` list with logs for the task. Each log event `message` contains detail about the event. - - If a task run fails, InfluxDB logs an event with the reason for the - failure. + If a task run fails, InfluxDB logs an event with the reason for the failure. '400': $ref: '#/components/responses/BadRequestError' '401': @@ -16842,11 +16039,9 @@ paths: /api/v2/tasks/{taskID}/members: get: deprecated: true - description: > + description: | **Deprecated**: Tasks don't use `owner` and `member` roles. - - Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user - permissions. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. operationId: GetTasksIDMembers parameters: - $ref: '#/components/parameters/TraceSpan' @@ -16876,17 +16071,11 @@ paths: - Tasks post: deprecated: true - description: > + description: | **Deprecated**: Tasks don't use `owner` and `member` roles. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. - Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user - permissions. - - - Adds a user to members of a task and returns the newly created member - with - - role and user detail. + Adds a user to members of a task and returns the member. operationId: PostTasksIDMembers parameters: - $ref: '#/components/parameters/TraceSpan' @@ -16922,11 +16111,9 @@ paths: /api/v2/tasks/{taskID}/members/{userID}: delete: deprecated: true - description: > + description: | **Deprecated**: Tasks don't use `owner` and `member` roles. - - Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user - permissions. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. operationId: DeleteTasksIDMembersID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -16957,12 +16144,9 @@ paths: /api/v2/tasks/{taskID}/owners: get: deprecated: true - description: > + description: | **Deprecated**: Tasks don't use `owner` and `member` roles. - - Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user - permissions. - + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. Retrieves all users that have owner permission for a task. operationId: GetTasksIDOwners @@ -16980,15 +16164,11 @@ paths: application/json: schema: $ref: '#/components/schemas/ResourceOwners' - description: > + description: | Success. + The response contains a list of `users` that have the `owner` role for the task. - The response contains a list of `users` that have the `owner` role - for the task. - - - If the task has no owners, the response contains an empty `users` - array. + If the task has no owners, the response contains an empty `users` array. '401': $ref: '#/components/responses/AuthorizationError' '422': @@ -16996,16 +16176,12 @@ paths: application/json: schema: $ref: '#/components/schemas/Error' - description: > + description: | Unprocessable entity. - The error may indicate one of the following problems: - - - The request body isn't valid--the request is well-formed, but - InfluxDB can't process it due to semantic errors. - + - The request body isn't valid--the request is well-formed, but InfluxDB can't process it due to semantic errors. - You passed a parameter combination that InfluxDB doesn't support. '500': $ref: '#/components/responses/InternalServerError' @@ -17020,18 +16196,13 @@ paths: - Tasks post: deprecated: true - description: > + description: | **Deprecated**: Tasks don't use `owner` and `member` roles. - - Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user - permissions. - + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. Assigns a task `owner` role to a user. - Use this endpoint to create a _resource owner_ for the task. - A _resource owner_ is a user with `role: owner` for a specific resource. operationId: PostTasksIDOwners parameters: @@ -17077,16 +16248,12 @@ paths: application/json: schema: $ref: '#/components/schemas/Error' - description: > + description: | Unprocessable entity. - The error may indicate one of the following problems: - - - The request body isn't valid--the request is well-formed, but - InfluxDB can't process it due to semantic errors. - + - The request body isn't valid--the request is well-formed, but InfluxDB can't process it due to semantic errors. - You passed a parameter combination that InfluxDB doesn't support. '500': $ref: '#/components/responses/InternalServerError' @@ -17102,11 +16269,9 @@ paths: /api/v2/tasks/{taskID}/owners/{userID}: delete: deprecated: true - description: > + description: | **Deprecated**: Tasks don't use `owner` and `member` roles. - - Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user - permissions. + Use [`/api/v2/authorizations`](#tag/Authorizations) to assign user permissions. operationId: DeleteTasksIDOwnersID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -17136,15 +16301,11 @@ paths: - Tasks /api/v2/tasks/{taskID}/runs: get: - description: > + description: | Retrieves a list of runs for a [task](/influxdb/v2.3/process-data/). - - To limit which task runs are returned, pass query parameters in your - request. - - If no query parameters are passed, InfluxDB returns all task runs up to - the default `limit`. + To limit which task runs are returned, pass query parameters in your request. + If no query parameters are passed, InfluxDB returns all task runs up to the default `limit`. operationId: GetTasksIDRuns parameters: - $ref: '#/components/parameters/TraceSpan' @@ -17170,20 +16331,16 @@ paths: maximum: 500 minimum: 1 type: integer - - description: > - A timestamp ([RFC3339 date/time - format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)). - + - description: | + A timestamp ([RFC3339 date/time format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)). Only returns runs scheduled after this time. in: query name: afterTime schema: format: date-time type: string - - description: > - A timestamp ([RFC3339 date/time - format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)). - + - description: | + A timestamp ([RFC3339 date/time format](/influxdb/v2.3/reference/glossary/#rfc3339-timestamp)). Only returns runs scheduled before this time. in: query name: beforeTime @@ -17207,22 +16364,15 @@ paths: tags: - Tasks post: - description: > + description: | Schedules a task run to start immediately, ignoring scheduled runs. - Use this endpoint to manually start a task run. - Scheduled runs will continue to run as scheduled. - This may result in concurrently running tasks. - To _retry_ a previous run (and avoid creating a new run), - - use the [`POST - /api/v2/tasks/{taskID}/runs/{runID}/retry`](#operation/PostTasksIDRunsIDRetry) - endpoint. + use the [`POST /api/v2/tasks/{taskID}/runs/{runID}/retry` endpoint](#operation/PostTasksIDRunsIDRetry). operationId: PostTasksIDRuns parameters: - $ref: '#/components/parameters/TraceSpan' @@ -17316,10 +16466,8 @@ paths: tags: - Tasks get: - description: > - Retrieves a specific run for a - [task](/influxdb/v2.3/reference/glossary/#task). - + description: | + Retrieves a specific run for a [task](/influxdb/v2.3/reference/glossary/#task). Use this endpoint to retrieve detail and logs for a specific task run. operationId: GetTasksIDRunsID @@ -17348,19 +16496,12 @@ paths: finishedAt: '2022-07-18T14:46:07.308254Z' id: 09b070dadaa7d000 links: - logs: >- - /api/v2/tasks/0996e56b2f378000/runs/09b070dadaa7d000/logs - retry: >- - /api/v2/tasks/0996e56b2f378000/runs/09b070dadaa7d000/retry + logs: /api/v2/tasks/0996e56b2f378000/runs/09b070dadaa7d000/logs + retry: /api/v2/tasks/0996e56b2f378000/runs/09b070dadaa7d000/retry self: /api/v2/tasks/0996e56b2f378000/runs/09b070dadaa7d000 task: /api/v2/tasks/0996e56b2f378000 log: - - message: >- - Started task from script: "option task = {name: - \"task1\", every: 30m} from(bucket: \"iot_center\") |> - range(start: -90d) |> filter(fn: (r) => r._measurement - == \"environment\") |> - aggregateWindow(every: 1h, fn: mean)" + - message: 'Started task from script: "option task = {name: \"task1\", every: 30m} from(bucket: \"iot_center\") |> range(start: -90d) |> filter(fn: (r) => r._measurement == \"environment\") |> aggregateWindow(every: 1h, fn: mean)"' runID: 09b070dadaa7d000 time: '2022-07-18T14:46:07.101231Z' - message: Completed(success) @@ -17389,15 +16530,11 @@ paths: - Tasks /api/v2/tasks/{taskID}/runs/{runID}/logs: get: - description: > + description: | Retrieves all logs for a task run. + A log is a list of run events with `runID`, `time`, and `message` properties. - A log is a list of run events with `runID`, `time`, and `message` - properties. - - - Use this endpoint to help analyze task performance and troubleshoot - failed task runs. + Use this endpoint to help analyze task performance and troubleshoot failed task runs. operationId: GetTasksIDRunsIDLogs parameters: - $ref: '#/components/parameters/TraceSpan' @@ -17422,32 +16559,20 @@ paths: summary: Events for a failed task. value: events: - - message: >- - Started task from script: "option task = {name: \"test - task\", every: 3d, offset: 0s}" + - message: 'Started task from script: "option task = {name: \"test task\", every: 3d, offset: 0s}"' runID: 09a946fc3167d000 time: '2022-07-13T07:06:54.198167Z' - message: Completed(failed) runID: 09a946fc3167d000 time: '2022-07-13T07:07:13.104037Z' - - message: >- - error exhausting result iterator: error in query - specification while starting program: this Flux script - returns no streaming data. Consider adding a "yield" - or invoking streaming functions directly, without - performing an assignment + - message: 'error exhausting result iterator: error in query specification while starting program: this Flux script returns no streaming data. Consider adding a "yield" or invoking streaming functions directly, without performing an assignment' runID: 09a946fc3167d000 time: '2022-07-13T08:24:37.115323Z' taskSuccess: summary: Events for a successful task run. value: events: - - message: >- - Started task from script: "option task = {name: - \"task1\", every: 30m} from(bucket: \"iot_center\") |> - range(start: -90d) |> filter(fn: (r) => r._measurement - == \"environment\") |> aggregateWindow(every: 1h, fn: - mean)" + - message: 'Started task from script: "option task = {name: \"task1\", every: 30m} from(bucket: \"iot_center\") |> range(start: -90d) |> filter(fn: (r) => r._measurement == \"environment\") |> aggregateWindow(every: 1h, fn: mean)"' runID: 09b070dadaa7d000 time: '2022-07-18T14:46:07.101231Z' - message: Completed(success) @@ -17455,14 +16580,10 @@ paths: time: '2022-07-18T14:46:07.242859Z' schema: $ref: '#/components/schemas/Logs' - description: > - Success. The response body contains an `events` list with logs for - the task run. - + description: | + Success. The response body contains an `events` list with logs for the task run. Each log event `message` contains detail about the event. - - If a run fails, InfluxDB logs an event with the reason for the - failure. + If a run fails, InfluxDB logs an event with the reason for the failure. '400': $ref: '#/components/responses/BadRequestError' '401': @@ -17478,28 +16599,34 @@ paths: - Tasks /api/v2/tasks/{taskID}/runs/{runID}/retry: post: - description: > - Queues a task run to retry and returns the newly scheduled run. - - - To manually start a _new_ task run, use the [`POST - /api/v2/tasks/{taskID}/runs`](#operation/PostTasksIDRuns) endpoint. + description: | + Queues a [task](/influxdb/v2.3/reference/glossary/#task) run to + retry and returns the scheduled run. + To manually start a _new_ task run, use the + [`POST /api/v2/tasks/{taskID}/runs` endpoint](#operation/PostTasksIDRuns). #### Limitations - - The task must be _active_ (`status: "active"`). operationId: PostTasksIDRunsIDRetry parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The ID of the task to retry. + - description: | + A [task](/influxdb/v2.3/reference/glossary/#task) ID. + Specifies the task to retry. in: path name: taskID required: true schema: type: string - - description: The ID of the task run to retry. + - description: | + A [task](/influxdb/v2.3/reference/glossary/#task) run ID. + Specifies the task run to retry. + + To find a task run ID, use the + [`GET /api/v2/tasks/{taskID}/runs` endpoint](#operation/GetTasksIDRuns) + to list task runs. in: path name: runID required: true @@ -17520,10 +16647,8 @@ paths: value: id: 09d60ffe08738000 links: - logs: >- - /api/v2/tasks/09a776832f381000/runs/09d60ffe08738000/logs - retry: >- - /api/v2/tasks/09a776832f381000/runs/09d60ffe08738000/retry + logs: /api/v2/tasks/09a776832f381000/runs/09d60ffe08738000/logs + retry: /api/v2/tasks/09a776832f381000/runs/09d60ffe08738000/retry self: /api/v2/tasks/09a776832f381000/runs/09d60ffe08738000 task: /api/v2/tasks/09a776832f381000 requestedAt: '2022-08-16T20:05:11.84145Z' @@ -17979,9 +17104,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ResourceOwner' - description: >- - Telegraf configuration owner was added. Returns a ResourceOwner that - references the User. + description: Telegraf configuration owner was added. Returns a ResourceOwner that references the User. default: content: application/json: @@ -18022,44 +17145,26 @@ paths: - Telegrafs /api/v2/templates/apply: post: - description: > + description: | Applies a template to - - create or update a [stack](/influxdb/v2.3/influxdb-templates/stacks/) of - InfluxDB - + create or update a [stack](/influxdb/v2.3/influxdb-templates/stacks/) of InfluxDB [resources](/influxdb/v2.3/reference/cli/influx/export/all/#resources). - The response contains the diff of changes and the stack ID. - Use this endpoint to install an InfluxDB template to an organization. - Provide template URLs or template objects in your request. - To customize which template resources are installed, use the `actions` - parameter. - By default, when you apply a template, InfluxDB installs the template to - - create and update stack resources and then generates a diff of the - changes. - + create and update stack resources and then generates a diff of the changes. If you pass `dryRun: true` in the request body, InfluxDB validates the - template and generates the resource diff, but doesn’t make any - changes to your instance. - #### Custom values for templates - - - Some templates may contain [environment - references](/influxdb/v2.3/influxdb-templates/create/#include-user-definable-resource-names) - for custom metadata. + - Some templates may contain [environment references](/influxdb/v2.3/influxdb-templates/create/#include-user-definable-resource-names) for custom metadata. To provide custom values for environment references, pass the _`envRefs`_ property in the request body. For more information and examples, see how to @@ -18074,21 +17179,16 @@ paths: #### Required permissions - - `write` permissions for resource types in the template. - #### Rate limits (with InfluxDB Cloud) - - Adjustable service quotas apply. For more information, see [limits and adjustable quotas](/influxdb/cloud/account-management/limits/). #### Related guides - - [Use templates](/influxdb/v2.3/influxdb-templates/use/) - - [Stacks](/influxdb/v2.3/influxdb-templates/stacks/) operationId: ApplyTemplate requestBody: @@ -18188,16 +17288,11 @@ paths: application/json: schema: $ref: '#/components/schemas/TemplateSummary' - description: > + description: | Success. - The template applied successfully. - The response body contains the stack ID, a diff, and a summary. - - The diff compares the initial state to the state after the template - installation. - + The diff compares the initial state to the state after the template installation. The summary contains newly created resources. '422': content: @@ -18367,9 +17462,7 @@ paths: application/x-yaml: schema: $ref: '#/components/schemas/Template' - description: >- - The template was created successfully. Returns the newly created - template. + description: The template was created successfully. Returns the newly created template. default: content: application/json: @@ -18381,33 +17474,39 @@ paths: - Templates /api/v2/users: get: - description: > - Retrieves a list of users. Default limit is `20`. + description: | + Retrieves a list of [users](/influxdb/v2.3/reference/glossary/#user). + Default limit is `20`. + To limit which users are returned, pass query parameters in your request. - To limit which users are returned, pass query parameters in your - request. + #### Required permissions for InfluxDB OSS + | Action | Permission required | Restriction | + |:-------|:--------------------|:------------| + | List all users | _[Operator token](/influxdb/latest/security/tokens/#operator-token)_ | | + | List a specific user | `read-users` or `read-user USER_ID` | | - #### Required permissions + Replace the following: - - - `read-user USER_ID` permission. - `USER_ID` is the ID of the user that you want to list. - - InfluxDB OSS requires an _[operator - token](/influxdb/latest/security/tokens/#operator-token))_ to list all - users. + - `USER_ID`: the ID of the user that you want to retrieve. operationId: GetUsers parameters: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/Offset' - $ref: '#/components/parameters/Limit' - $ref: '#/components/parameters/After' - - in: query + - description: | + A user name. + Only lists the specified [user](/influxdb/v2.3/reference/glossary/#user). + in: query name: name schema: type: string - - in: query + - description: | + A user ID. + Only lists the specified [user](/influxdb/v2.3/reference/glossary/#user). + in: query name: id schema: type: string @@ -18436,16 +17535,13 @@ paths: application/json: schema: $ref: '#/components/schemas/Error' - description: > + description: | Unprocessable entity. - The error may indicate one of the following problems: - - - The request body isn't valid--the request is well-formed, but - InfluxDB can't process it due to semantic errors. - + - The request body isn't valid--the request is well-formed, + but InfluxDB can't process it due to semantic errors. - You passed a parameter combination that InfluxDB doesn't support. '500': $ref: '#/components/responses/InternalServerError' @@ -18458,11 +17554,34 @@ paths: - Users post: description: | - Creates a user and returns the newly created user. + Creates a [user](/influxdb/v2.3/reference/glossary/#user) that can access InfluxDB. + Returns the user. + + Use this endpoint to create a user that can sign in to start a user session + through one of the following interfaces: + + - InfluxDB UI + - `/api/v2/signin` InfluxDB API endpoint + - InfluxDB CLI + + This endpoint represents the first two steps in a four-step process to allow a user + to authenticate with a username and password, and then access data in an organization: + + 1. Create a user: send a `POST` request to `POST /api/v2/users`. `name` is required. + 2. Extract the user ID (`id`) value from the API response for _step 1_. + 3. Create an authorization (and API token) for the user: send a `POST` request to [`POST /api/v2/authorizations`](#operation/PostAuthorizations), passing the user ID (`id`) from _step 2_. + 4. Create a password for the user: send a `POST` request to [`POST /api/v2/users/USER_ID/password`](#operation/PostUsersIDPassword), passing the user ID from _step 2_. #### Required permissions - - `write-users`. Requires an InfluxDB API **Op** token. + | Action | Permission required | Restriction | + |:-------|:--------------------|:------------| + | Create a user | _[Operator token](/influxdb/latest/security/tokens/#operator-token)_ | | + + #### Related guides + + - [Create a user](/influxdb/latest/users/create-user/) + - [Create an API token scoped to a user](/influxdb/latest/security/tokens/create-token/#create-a-token-scoped-to-a-user) operationId: PostUsers parameters: - $ref: '#/components/parameters/TraceSpan' @@ -18481,7 +17600,7 @@ paths: $ref: '#/components/schemas/UserResponse' description: | Success. - The response contains the newly created user. + The response body contains the user. '401': content: application/json: @@ -18500,16 +17619,12 @@ paths: application/json: schema: $ref: '#/components/schemas/Error' - description: > + description: | Unprocessable entity. - The error may indicate one of the following problems: - - - The request body isn't valid--the request is well-formed, but - InfluxDB can't process it due to semantic errors. - + - The request body isn't valid--the request is well-formed, but InfluxDB can't process it due to semantic errors. - You passed a parameter combination that InfluxDB doesn't support. '500': $ref: '#/components/responses/InternalServerError' @@ -18522,9 +17637,8 @@ paths: x-codeSamples: - label: 'cURL: create a user and set a password' lang: Shell - source: > + source: | # Create the user and assign the user ID to a variable. - USER_ID=$(curl --request POST \ "http://localhost:8086/api/v2/users/" \ --header "Authorization: Token INFLUX_OP_TOKEN" \ @@ -18535,14 +17649,10 @@ paths: "status": "active" } EOF - ) - # Pass the user ID and a password to set the password for the user. - - curl request POST - "http://localhost:8086/api/v2/users/$USER_ID/password/" \ + curl request POST "http://localhost:8086/api/v2/users/$USER_ID/password/" \ --header "Authorization: Token INFLUX_OP_TOKEN" \ --header 'Content-type: application/json' \ --data '{ "password": "USER_PASSWORD" }' @@ -18567,10 +17677,18 @@ paths: tags: - Users get: + description: | + Retrieves a [user](/influxdb/v2.3/reference/glossary/#user). + + #### Related guides + + - [Manage users](/influxdb/v2.3/organizations/users/) operationId: GetUsersID parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The user ID. + - description: | + A user ID. + Retrieves the specified [user](/influxdb/v2.3/reference/glossary/#user). in: path name: userID required: true @@ -18582,7 +17700,7 @@ paths: application/json: schema: $ref: '#/components/schemas/UserResponse' - description: User details + description: Success. The response body contains the user. default: $ref: '#/components/responses/GeneralServerError' description: Unexpected error @@ -18623,14 +17741,25 @@ paths: /api/v2/users/{userID}/password: post: description: | + Updates a user password. + + Use this endpoint to let a user authenticate with + [Basic authentication credentials](#section/Authentication/BasicAuthentication) + and set a new password. + #### InfluxDB Cloud - InfluxDB Cloud doesn't support changing user passwords through the API. - Use the InfluxDB Cloud user interface to update your password. + - Doesn't allow you to manage user passwords through the API. + Use the InfluxDB Cloud user interface (UI) to update a password. + + #### Related guides + + - [InfluxDB Cloud - Change your password](/influxdb/cloud/account-management/change-password/) + - [InfluxDB OSS - Change your password](/influxdb/latest/users/change-password/) operationId: PostUsersIDPassword parameters: - $ref: '#/components/parameters/TraceSpan' - - description: The user ID. + - description: The ID of the user to set the password for. in: path name: userID required: true @@ -18641,29 +17770,52 @@ paths: application/json: schema: $ref: '#/components/schemas/PasswordResetBody' - description: New password + description: The new password to set for the user. required: true responses: '204': - description: Password successfully updated + description: Success. The password is updated. '400': - description: > - Bad request. - - InfluxDB Cloud doesn't support changing passwords through the API - and always responds with this status. - default: content: application/json: + examples: + updatePasswordNotAllowed: + summary: Cloud API can't update passwords + value: + code: invalid + message: passwords cannot be changed through the InfluxDB Cloud API schema: $ref: '#/components/schemas/Error' - description: Unsuccessful authentication + description: | + Bad request. + + #### InfluxDB Cloud + + - Doesn't allow you to manage passwords through the API; always responds with this status. + + #### InfluxDB OSS + + - Doesn't understand a value passed in the request. + default: + $ref: '#/components/responses/GeneralServerError' + description: Unexpected error security: - BasicAuthentication: [] summary: Update a password tags: - Security and access endpoints - Users + x-codeSamples: + - label: 'cURL: use Basic auth to update the user password' + lang: Shell + source: | + curl --request POST \ + "http://localhost:8086/api/v2/users/USER_ID/password" \ + --header 'Content-type: application/json' \ + --user "USERNAME:PASSWORD" \ + --data-binary @- << EOF + {"password": ""} + EOF /api/v2/variables: get: operationId: GetVariables @@ -18922,18 +18074,13 @@ paths: - Variables /api/v2/write: post: - description: > + description: | Writes data to a bucket. - - Use this endpoint to send data in [line - protocol](/influxdb/v2.3/reference/syntax/line-protocol/) format to - InfluxDB. - + Use this endpoint to send data in [line protocol](/influxdb/v2.3/reference/syntax/line-protocol/) format to InfluxDB. #### InfluxDB Cloud - - Takes the following steps when you send a write request: 1. Validates the request and queues the write. @@ -18949,40 +18096,27 @@ paths: #### InfluxDB OSS - - Validates the request, handles the write synchronously, and then responds with success or failure. - - If all points were written successfully, responds with HTTP `204` - status code; + - If all points were written successfully, responds with HTTP `204` status code; otherwise, returns the first line that failed. #### Required permissions - - `write-buckets` or `write-bucket BUCKET_ID`. `BUCKET_ID` is the ID of the destination bucket. #### Rate limits (with InfluxDB Cloud) - `write` rate limits apply. - - For more information, see [limits and adjustable - quotas](/influxdb/cloud/account-management/limits/). - + For more information, see [limits and adjustable quotas](/influxdb/cloud/account-management/limits/). #### Related guides - - - [Write data with the InfluxDB - API](/influxdb/v2.3/write-data/developer-tools/api). - - - [Optimize writes to - InfluxDB](/influxdb/v2.3/write-data/best-practices/optimize-writes/). - - - [Troubleshoot issues writing - data](/influxdb/v2.3/write-data/troubleshoot/) + - [Write data with the InfluxDB API](/influxdb/v2.3/write-data/developer-tools/api) + - [Optimize writes to InfluxDB](/influxdb/v2.3/write-data/best-practices/optimize-writes/) + - [Troubleshoot issues writing data](/influxdb/v2.3/write-data/troubleshoot/) operationId: PostWrite parameters: - $ref: '#/components/parameters/TraceSpan' @@ -18993,27 +18127,22 @@ paths: name: Content-Encoding schema: default: identity - description: > + description: | Content coding. - - Use `gzip` for compressed data or `identity` for unmodified, - uncompressed data. + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity type: string - - description: > + - description: | The format of the data in the request body. - - To send a line protocol payload, pass `Content-Type: text/plain; - charset=utf-8`. + To send a line protocol payload, pass `Content-Type: text/plain; charset=utf-8`. in: header name: Content-Type schema: default: text/plain; charset=utf-8 - description: > - `text/plain` is the content type for line protocol. `UTF-8` is the - default character set. + description: | + `text/plain` is the content type for line protocol. `UTF-8` is the default character set. enum: - text/plain - text/plain; charset=utf-8 @@ -19042,7 +18171,8 @@ paths: - Returns only `application/json` for format and limit errors. #### Related guides - - [Troubleshoot issues writing data](/influxdb/v2.3/write-data/troubleshoot/). + + - [Troubleshoot issues writing data](/influxdb/v2.3/write-data/troubleshoot/) in: header name: Accept schema: @@ -19051,28 +18181,19 @@ paths: enum: - application/json type: string - - description: > + - description: | The destination organization for writes. - InfluxDB writes all points in the batch to this organization. - If you pass both `orgID` and `org`, they must both be valid. - #### InfluxDB Cloud - - Doesn't require `org` or `orgID`. - - - Writes to the bucket in the organization associated with the - authorization (API token). - + - Writes to the bucket in the organization associated with the authorization (API token). #### InfluxDB OSS - - Requires either `org` or `orgID`. - - InfluxDB writes all points in the batch to this organization. in: query name: org @@ -19080,27 +18201,19 @@ paths: schema: description: The organization name or ID. type: string - - description: > + - description: | The ID of the destination organization for writes. - If you pass both `orgID` and `org`, they must both be valid. - #### InfluxDB Cloud - - Doesn't require `org` or `orgID`. - - - Writes to the bucket in the organization associated with the - authorization (API token). - + - Writes to the bucket in the organization associated with the authorization (API token). #### InfluxDB OSS - - Requires either `org` or `orgID`. - - InfluxDB writes all points in the batch to this organization. in: query name: orgID @@ -19125,21 +18238,15 @@ paths: text/plain: examples: plain-utf8: - value: > - airSensors,sensor_id=TLM0201 - temperature=73.97038159354763,humidity=35.23103248356096,co=0.48445310567793615 - 1630424257000000000 - - airSensors,sensor_id=TLM0202 - temperature=75.30007505999716,humidity=35.651929918691714,co=0.5141876544505826 - 1630424257000000000 + value: | + airSensors,sensor_id=TLM0201 temperature=73.97038159354763,humidity=35.23103248356096,co=0.48445310567793615 1630424257000000000 + airSensors,sensor_id=TLM0202 temperature=75.30007505999716,humidity=35.651929918691714,co=0.5141876544505826 1630424257000000000 schema: format: byte type: string - description: > + description: | Data in line protocol format. - To send compressed data, do the following: 1. Use [GZIP](https://www.gzip.org/) to compress the line protocol data. @@ -19148,52 +18255,34 @@ paths: #### Related guides - - - [Best practices for optimizing - writes](/influxdb/v2.3/write-data/best-practices/optimize-writes/). + - [Best practices for optimizing writes](/influxdb/v2.3/write-data/best-practices/optimize-writes/) required: true responses: '204': - description: > + description: | Success. - #### InfluxDB Cloud - - Validated and queued the request. - - - Handles the write asynchronously - the write might not have - completed yet. - + - Handles the write asynchronously - the write might not have completed yet. #### InfluxDB OSS - - Successfully wrote all points in the batch. - #### Related guides - - - [How to check for write - errors](/influxdb/v2.3/write-data/troubleshoot/). + - [How to check for write errors](/influxdb/v2.3/write-data/troubleshoot/) '400': content: application/json: examples: measurementSchemaFieldTypeConflict: - summary: >- - (Cloud) field type conflict thrown by an explicit bucket - schema + summary: (Cloud) field type conflict thrown by an explicit bucket schema value: code: invalid - message: >- - partial write error (2 written): unable to parse - 'air_sensor,service=S1,sensor=L1 - temperature="90.5",humidity=70.0 1632850122': schema: - field type for field "temperature" not permitted by - schema; got String but expected Float + message: 'partial write error (2 written): unable to parse ''air_sensor,service=S1,sensor=L1 temperature="90.5",humidity=70.0 1632850122'': schema: field type for field "temperature" not permitted by schema; got String but expected Float' orgNotFound: summary: (OSS) organization not found value: @@ -19201,34 +18290,21 @@ paths: message: 'failed to decode request body: organization not found' schema: $ref: '#/components/schemas/LineProtocolError' - description: > + description: | Bad request. The response body contains detail about the error. - - InfluxDB returns this error if the line protocol data in the request - is malformed. - - The response body contains the first malformed line in the data, and - indicates what was expected. - - For partial writes, the number of points written and the number of - points rejected are also included. - - For more information, check the `rejected_points` measurement in - your `_monitoring` bucket. - + InfluxDB returns this error if the line protocol data in the request is malformed. + The response body contains the first malformed line in the data, and indicates what was expected. + For partial writes, the number of points written and the number of points rejected are also included. + For more information, check the `rejected_points` measurement in your `_monitoring` bucket. #### InfluxDB Cloud - - Returns this error for bucket schema conflicts. - #### InfluxDB OSS - - - Returns this error if `org` or `orgID` doesn't match an - organization. + - Returns this error if `org` or `orgID` doesn't match an organization. '401': $ref: '#/components/responses/AuthorizationError' '404': @@ -19239,9 +18315,8 @@ paths: examples: dataExceedsSizeLimitOSS: summary: InfluxDB OSS response - value: > - {"code":"request too large","message":"unable to read data: - points batch is too large"} + value: | + {"code":"request too large","message":"unable to read data: points batch is too large"} schema: $ref: '#/components/schemas/LineProtocolLengthError' text/html: @@ -19291,28 +18366,22 @@ paths: - Doesn't return this error. headers: Retry-After: - description: >- - Non-negative decimal integer indicating seconds to wait before - retrying the request. + description: Non-negative decimal integer indicating seconds to wait before retrying the request. schema: format: int32 type: integer '500': $ref: '#/components/responses/InternalServerError' '503': - description: > + description: | Service unavailable. - - Returns this error if the server is temporarily unavailable to accept writes. - - Returns a `Retry-After` header that describes when to try the - write again. + - Returns a `Retry-After` header that describes when to try the write again. headers: Retry-After: - description: >- - Non-negative decimal integer indicating seconds to wait before - retrying the request. + description: Non-negative decimal integer indicating seconds to wait before retrying the request. schema: format: int32 type: integer @@ -19329,42 +18398,42 @@ paths: - $ref: '#/components/parameters/TraceSpan' - description: | A user ID. - Only returns legacy authorizations scoped to this user. + Only returns legacy authorizations scoped to the specified [user](/influxdb/v2.4/reference/glossary/#user). in: query name: userID schema: type: string - description: | A user name. - Only returns legacy authorizations scoped to this user. + Only returns legacy authorizations scoped to the specified [user](/influxdb/v2.4/reference/glossary/#user). in: query name: user schema: type: string - description: | An organization ID. - Only returns legacy authorizations that belong to this organization. + Only returns legacy authorizations that belong to the specified [organization](/influxdb/v2.4/reference/glossary/#organization). in: query name: orgID schema: type: string - description: | An organization name. - Only returns legacy authorizations that belong to this organization. + Only returns legacy authorizations that belong to the specified [organization](/influxdb/v2.4/reference/glossary/#organization). in: query name: org schema: type: string - description: | An authorization name token. - Only returns legacy authorizations with this token (name). + Only returns legacy authorizations with the specified name. in: query name: token schema: type: string - description: | An authorization ID. - Only returns the legacy authorization with this ID. + Returns the specified legacy authorization. in: query name: authID schema: @@ -19383,9 +18452,7 @@ paths: $ref: '#/components/schemas/Links' readOnly: true type: object - description: >- - Success. The response body contains a list of legacy - `authorizations`. + description: Success. The response body contains a list of legacy `authorizations`. default: $ref: '#/components/responses/ServerError' description: Unexpected error @@ -19393,20 +18460,14 @@ paths: tags: - Legacy Authorizations post: - description: > - Creates a legacy authorization and returns the newly created - authorization. - + description: | + Creates a legacy authorization and returns the legacy authorization. #### Required permissions + - `write-users USER_ID` if you pass the `userID` property in the request body. - - `write-users USER_ID` if you pass the `userID` property in the request - body. - - - `USER_ID` is the ID of the user that you want to scope the authorization - to. + `USER_ID` is the ID of the user that you want to scope the authorization to. operationId: PostLegacyAuthorizations parameters: - $ref: '#/components/parameters/TraceSpan' @@ -19441,17 +18502,14 @@ paths: schema: properties: code: - description: > - The HTTP status code description. Default is - `unauthorized`. + description: | + The HTTP status code description. Default is `unauthorized`. enum: - unauthorized readOnly: true type: string message: - description: >- - A human-readable message that may contain detail about the - error. + description: A human-readable message that may contain detail about the error. readOnly: true type: string description: | @@ -19584,30 +18642,22 @@ paths: name: Accept schema: default: application/json - description: > + description: | Media type that the client can understand. - - **Note**: With `application/csv`, query results include [**unix - timestamps**](/influxdb/v2.4/reference/glossary/#unix-timestamp) - instead of [RFC3339 - timestamps](/influxdb/v2.4/reference/glossary/#rfc3339-timestamp). + **Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb/v2.4/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb/v2.4/reference/glossary/#rfc3339-timestamp). enum: - application/json - application/csv - text/csv - application/x-msgpack type: string - - description: >- - The content encoding (usually a compression algorithm) that the - client can understand. + - description: The content encoding (usually a compression algorithm) that the client can understand. in: header name: Accept-Encoding schema: default: identity - description: >- - The content coding. Use `gzip` for compressed data or `identity` - for unmodified, uncompressed data. + description: The content coding. Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. enum: - gzip - identity @@ -19628,49 +18678,33 @@ paths: name: p schema: type: string - - description: > + - description: | The database to query data from. - - This is mapped to an InfluxDB - [bucket](/influxdb/v2.4/reference/glossary/#bucket). - - For more information, see [Database and retention policy - mapping](/influxdb/v2.4/api/influxdb-1x/dbrp/). + This is mapped to an InfluxDB [bucket](/influxdb/v2.4/reference/glossary/#bucket). + For more information, see [Database and retention policy mapping](/influxdb/v2.4/api/influxdb-1x/dbrp/). in: query name: db required: true schema: type: string - - description: > + - description: | The retention policy to query data from. - - This is mapped to an InfluxDB - [bucket](/influxdb/v2.4/reference/glossary/#bucket). - - For more information, see [Database and retention policy - mapping](/influxdb/v2.4/api/influxdb-1x/dbrp/). + This is mapped to an InfluxDB [bucket](/influxdb/v2.4/reference/glossary/#bucket). + For more information, see [Database and retention policy mapping](/influxdb/v2.4/api/influxdb-1x/dbrp/). in: query name: rp schema: type: string - - description: >- - The InfluxQL query to execute. To execute multiple queries, delimit - queries with a semicolon (`;`). + - description: The InfluxQL query to execute. To execute multiple queries, delimit queries with a semicolon (`;`). in: query name: q required: true schema: type: string - - description: > + - description: | A unix timestamp precision. - - Formats timestamps as [unix (epoch) - timestamps](/influxdb/v2.4/reference/glossary/#unix-timestamp) the - specified precision - - instead of [RFC3339 - timestamps](/influxdb/v2.4/reference/glossary/#rfc3339-timestamp) - with nanosecond precision. + Formats timestamps as [unix (epoch) timestamps](/influxdb/v2.4/reference/glossary/#unix-timestamp) the specified precision + instead of [RFC3339 timestamps](/influxdb/v2.4/reference/glossary/#rfc3339-timestamp) with nanosecond precision. in: query name: epoch schema: @@ -19702,9 +18736,7 @@ paths: description: Query results headers: Content-Encoding: - description: >- - Lists encodings (usually compression algorithms) that have been - applied to the response payload. + description: Lists encodings (usually compression algorithms) that have been applied to the response payload. schema: default: identity description: | @@ -19733,9 +18765,7 @@ paths: - doesn't return this error. headers: Retry-After: - description: >- - A non-negative decimal integer indicating the seconds to delay - after the response is received. + description: A non-negative decimal integer indicating the seconds to delay after the response is received. schema: format: int32 type: integer @@ -19763,9 +18793,7 @@ paths: name: p schema: type: string - - description: >- - Bucket to write to. If none exists, InfluxDB creates a bucket with a - default 3-day retention policy. + - description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy. in: query name: db required: true @@ -19781,16 +18809,12 @@ paths: name: precision schema: type: string - - description: >- - When present, its value indicates to the database that compression - is applied to the line protocol body. + - description: When present, its value indicates to the database that compression is applied to the line protocol body. in: header name: Content-Encoding schema: default: identity - description: >- - Specifies that the line protocol in the body is encoded with gzip - or not encoded with identity. + description: Specifies that the line protocol in the body is encoded with gzip or not encoded with identity. enum: - gzip - identity @@ -19804,26 +18828,19 @@ paths: required: true responses: '204': - description: >- - Write data is correctly formatted and accepted for writing to the - bucket. + description: Write data is correctly formatted and accepted for writing to the bucket. '400': content: application/json: schema: $ref: '#/components/schemas/LineProtocolError' - description: >- - Line protocol poorly formed and no points were written. Response - can be used to determine the first malformed line in the body - line-protocol. All data in body was rejected and not written. + description: Line protocol poorly formed and no points were written. Response can be used to determine the first malformed line in the body line-protocol. All data in body was rejected and not written. '401': content: application/json: schema: $ref: '#/components/schemas/Error' - description: >- - Token doesn't have sufficient permissions to write to this - organization and bucket or the organization and bucket do not exist. + description: Token doesn't have sufficient permissions to write to this organization and bucket or the organization and bucket do not exist. '403': content: application/json: @@ -19835,31 +18852,20 @@ paths: application/json: schema: $ref: '#/components/schemas/LineProtocolLengthError' - description: >- - Write has been rejected because the payload is too large. Error - message returns max size supported. All data in body was rejected - and not written. + description: Write has been rejected because the payload is too large. Error message returns max size supported. All data in body was rejected and not written. '429': - description: >- - Token is temporarily over quota. The Retry-After header describes - when to try the write again. + description: Token is temporarily over quota. The Retry-After header describes when to try the write again. headers: Retry-After: - description: >- - A non-negative decimal integer indicating the seconds to delay - after the response is received. + description: A non-negative decimal integer indicating the seconds to delay after the response is received. schema: format: int32 type: integer '503': - description: >- - Server is temporarily unavailable to accept writes. The Retry-After - header describes when to try the write again. + description: Server is temporarily unavailable to accept writes. The Retry-After header describes when to try the write again. headers: Retry-After: - description: >- - A non-negative decimal integer indicating the seconds to delay - after the response is received. + description: A non-negative decimal integer indicating the seconds to delay after the response is received. schema: format: int32 type: integer @@ -19877,192 +18883,114 @@ security: servers: - url: / tags: - - description: > + - description: | Use one of the following schemes to authenticate to the InfluxDB API: - - [Token authentication](#section/Authentication/TokenAuthentication) - - [Basic authentication](#section/Authentication/BasicAuthentication) - - - [Querystring - authentication](#section/Authentication/QuerystringAuthentication) - + - [Querystring authentication](#section/Authentication/QuerystringAuthentication) name: Authentication x-traitTag: true - - description: > + - description: | Create and manage authorizations (API tokens). - An _authorization_ contains a list of `read` and `write` - - permissions for organization resources and provides an API token for - authentication. - - An authorization belongs to an organization and only contains permissions - for that organization. - + permissions for organization resources and provides an API token for authentication. + An authorization belongs to an organization and only contains permissions for that organization. #### Limitations - - API tokens are visible to the user who created the authorization and to - any - - user with an _[operator - token](/influxdb/v2.3/security/tokens/#operator-token)_. - - In InfluxDB OSS, even if an API token has `read-authorizations` - permission, the - + API tokens are visible to the user who created the authorization and to any + user with an _[operator token](/influxdb/v2.3/security/tokens/#operator-token)_. + In InfluxDB OSS, even if an API token has `read-authorizations` permission, the token can't be used to view its authorization details. - Tokens stop working when the user who created the token is deleted. - - We recommend creating a generic user to create and manage tokens for - writing data. - + We recommend creating a generic user to create and manage tokens for writing data. #### User sessions with authorizations - If a user signs in with username and password, creating a _user session_, - - the session carries the permissions granted by all the user's - authorizations. - - To create a user session, use the [`POST - /api/v2/signin`](#operation/PostSignin) endpoint. - + the session carries the permissions granted by all the user's authorizations. + To create a user session, use the [`POST /api/v2/signin` endpoint](#operation/PostSignin). ### Related endpoints - - [Signin](#tag/Signin) - - [Signout](#tag/Signout) - ### Related guides - - [Authorize API requests](/influxdb/v2.3/api-guide/api_intro/#authentication). - - [Manage API tokens](/influxdb/v2.3/security/tokens/). - - [Assign a token to a specific user](/influxdb/v2.3/security/tokens/create-token/). + - [Authorize API requests](/influxdb/v2.3/api-guide/api_intro/#authentication) + - [Manage API tokens](/influxdb/v2.3/security/tokens/) + - [Assign a token to a specific user](/influxdb/v2.3/security/tokens/create-token/) name: Authorizations - name: Backup - - description: > - Store your data in InfluxDB - [buckets](/influxdb/v2.3/reference/glossary/#bucket). - + - description: | + Store your data in InfluxDB [buckets](/influxdb/v2.3/reference/glossary/#bucket). A bucket is a named location where time series data is stored. All buckets - - have a [retention - period](/influxdb/v2.3/reference/glossary/#retention-period), - + have a [retention period](/influxdb/v2.3/reference/glossary/#retention-period), a duration of time that each data point persists. InfluxDB drops all - points with timestamps older than the bucket’s retention period. - A bucket belongs to an organization. - ### Related guides - - [Manage buckets](/influxdb/v2.3/organizations/buckets/) + - [Manage buckets](/influxdb/v2.3/organizations/buckets/) name: Buckets - name: Cells - name: Checks - - description: > - Many InfluxDB API endpoints require parameters to specify resources--for - example, - + - description: | + Many InfluxDB API endpoints require parameters to specify resources--for example, writing to a **bucket** in an **organization**. - ### Common query parameters - - | Query parameter | Value type | - Description | - - |:------------------------ |:--------------------- - |:-------------------------------------------| - - | `bucket` | string | The bucket name or ID - ([find your bucket](/influxdb/v2.3/organizations/buckets/view-buckets/). | - - | `bucketID` | string | The bucket ID ([find - your bucket](/influxdb/v2.3/organizations/buckets/view-buckets/). | - - | `org` | string | The organization name - or ID ([find your organization](/influxdb/v2.3/organizations/view-orgs/). - | - - | `orgID` | 16-byte string | The organization ID - ([find your organization](/influxdb/v2.3/organizations/view-orgs/). | + | Query parameter | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `bucket` | string | The bucket name or ID ([find your bucket](/influxdb/v2.3/organizations/buckets/view-buckets/). | + | `bucketID` | string | The bucket ID ([find your bucket](/influxdb/v2.3/organizations/buckets/view-buckets/). | + | `org` | string | The organization name or ID ([find your organization](/influxdb/v2.3/organizations/view-orgs/). | + | `orgID` | 16-byte string | The organization ID ([find your organization](/influxdb/v2.3/organizations/view-orgs/). | name: Common parameters x-traitTag: true - name: Config - name: Dashboards - name: Data I/O endpoints - name: DBRPs - - description: > + - description: | Generate profiling and trace reports. - Use routes under `/debug/pprof` to analyze the Go runtime of InfluxDB. - - These endpoints generate [Go runtime - profiles](https://pkg.go.dev/runtime/pprof) - + These endpoints generate [Go runtime profiles](https://pkg.go.dev/runtime/pprof) and **trace** reports. - **Profiles** are collections of stack traces that show call sequences - leading to instances of a particular event, such as allocation. - For more information about **pprof profile** and **trace** reports, - see the following resources: - - [Google pprof tool](https://github.com/google/pprof) - - [Golang diagnostics](https://go.dev/doc/diagnostics) + + - [Google pprof tool](https://github.com/google/pprof) + - [Golang diagnostics](https://go.dev/doc/diagnostics) name: Debug - description: | Delete data from an InfluxDB bucket. name: Delete - - description: > + - description: | InfluxDB API endpoints use standard HTTP request and response headers. - **Note**: Not all operations support all headers. - ### Request headers - - | Header | Value type | - Description | - - |:------------------------ |:--------------------- - |:-------------------------------------------| - - | `Accept` | string | The content type that - the client can understand. | - - | `Authorization` | string | The authorization - scheme and credential. | - - | `Content-Encoding` | string | The compression - applied to the line protocol in the request payload. | - - | `Content-Length` | integer | The size of the - entity-body, in bytes, sent to the database. | - - | `Content-Type` | string | The format of the - data in the request body. | + | Header | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `Accept` | string | The content type that the client can understand. | + | `Authorization` | string | The authorization scheme and credential. | + | `Content-Encoding` | string | The compression applied to the line protocol in the request payload. | + | `Content-Length` | integer | The size of the entity-body, in bytes, sent to the database. | + | `Content-Type` | string | The format of the data in the request body. | name: Headers x-traitTag: true - name: Health @@ -20073,101 +19001,50 @@ tags: - name: Metrics - name: NotificationEndpoints - name: NotificationRules - - description: > - Create and manage your - [organizations](/influxdb/v2.3/reference/glossary/#organization). - + - description: | + Create and manage your [organizations](/influxdb/v2.3/reference/glossary/#organization). An organization is a workspace for a group of users. Organizations can be - used to separate different environments, projects, teams or users within - InfluxDB. - - Use the `/api/v2/orgs` endpoints to create, view, and manage - organizations. + Use the `/api/v2/orgs` endpoints to create, view, and manage organizations. name: Organizations - name: Ping - description: | Retrieve data, analyze queries, and get query suggestions. name: Query - - description: > + - description: | See the [**API Quick Start**](/influxdb/v2.3/api-guide/api_intro/) + to get up and running authenticating with tokens, writing to buckets, and querying data. - to get up and running authenticating with tokens, writing to buckets, and - querying data. - - - [**InfluxDB API client - libraries**](/influxdb/v2.3/api-guide/client-libraries/) - - are available for popular languages and ready to import into your - application. + [**InfluxDB API client libraries**](/influxdb/v2.3/api-guide/client-libraries/) + are available for popular languages and ready to import into your application. name: Quick start x-traitTag: true - name: Ready - name: RemoteConnections - name: Replications - name: Resources - - description: > - InfluxDB API endpoints use standard HTTP status codes for success and - failure responses. - + - description: | + InfluxDB API endpoints use standard HTTP status codes for success and failure responses. The response body may include additional details. - For details about a specific operation's response, - see **Responses** and **Response Samples** for that operation. - API operations may return the following HTTP status codes: - |  Code  | Status | Description | - |:-----------:|:------------------------ |:--------------------- | - | `200` | Success | | - - | `204` | No content | For a `POST` request, `204` - indicates that InfluxDB accepted the request and request data is valid. - Asynchronous operations, such as `write`, might not have completed yet. | - - | `400` | Bad request | May indicate one of the - following:
  • Line protocol is malformed. The response body contains - the first malformed line in the data and indicates what was expected. For - partial writes, the number of points written and the number of points - rejected are also included. For more information, check the - `rejected_points` measurement in your `_monitoring` - bucket.
  • `Authorization` header is missing or malformed or the API - token doesn't have permission for the operation.
| - - | `401` | Unauthorized | May indicate one of the - following:
  • `Authorization: Token` header is missing or - malformed
  • API token value is missing from the header
  • API - token doesn't have permission. For more information about token types and - permissions, see [Manage API - tokens](/influxdb/latest/security/tokens/)
| - - | `404` | Not found | Requested resource was not - found. `message` in the response body provides details about the requested - resource. | - - | `413` | Request entity too large | Request payload exceeds the - size limit. | - - | `422` | Unprocessable entity | Request data is invalid. `code` - and `message` in the response body provide details about the problem. | - - | `429` | Too many requests | API token is temporarily over - the request quota. The `Retry-After` header describes when to try the - request again. | - + | `204` | No content | For a `POST` request, `204` indicates that InfluxDB accepted the request and request data is valid. Asynchronous operations, such as `write`, might not have completed yet. | + | `400` | Bad request | May indicate one of the following:
  • Line protocol is malformed. The response body contains the first malformed line in the data and indicates what was expected. For partial writes, the number of points written and the number of points rejected are also included. For more information, check the `rejected_points` measurement in your `_monitoring` bucket.
  • `Authorization` header is missing or malformed or the API token doesn't have permission for the operation.
| + | `401` | Unauthorized | May indicate one of the following:
  • `Authorization: Token` header is missing or malformed
  • API token value is missing from the header
  • API token doesn't have permission. For more information about token types and permissions, see [Manage API tokens](/influxdb/latest/security/tokens/)
| + | `404` | Not found | Requested resource was not found. `message` in the response body provides details about the requested resource. | + | `413` | Request entity too large | Request payload exceeds the size limit. | + | `422` | Unprocessable entity | Request data is invalid. `code` and `message` in the response body provide details about the problem. | + | `429` | Too many requests | API token is temporarily over the request quota. The `Retry-After` header describes when to try the request again. | | `500` | Internal server error | | - - | `503` | Service unavailable | Server is temporarily - unavailable to process the request. The `Retry-After` header describes - when to try the request again. | + | `503` | Service unavailable | Server is temporarily unavailable to process the request. The `Retry-After` header describes when to try the request again. | name: Response codes x-traitTag: true - name: Restore @@ -20181,99 +19058,60 @@ tags: - name: Signout - name: Sources - name: System information endpoints - - description: > + - description: | Process and analyze your data with tasks in the InfluxDB task engine. + With tasks, you can schedule Flux scripts to query, analyze, modify, and act on data. - With tasks, you can schedule Flux scripts to query, analyze, modify, and - act on data. - - - Use the `/api/v2/tasks` endpoints to create and manage tasks, retry task - runs, and retrieve run logs. - + Use the `/api/v2/tasks` endpoints to create and manage tasks, retry task runs, and retrieve run logs. #### Related guides - - [Get started with tasks](/influxdb/v2.3/process-data/get-started/) - - - [Common data processing - tasks](/influxdb/v2.3/process-data/common-tasks/) + - [Common data processing tasks](/influxdb/v2.3/process-data/common-tasks/) name: Tasks - name: Telegraf Plugins - name: Telegrafs - - description: > + - description: | Export and apply InfluxDB **templates**. - Manage **stacks** of templated InfluxDB resources. - InfluxDB templates are prepackaged configurations for - everything from dashboards and Telegraf to notifications and alerts. - Use InfluxDB templates to quickly configure a fresh instance of InfluxDB, - back up your dashboard configuration, or share your configuration with the - InfluxData community. - - Use the `/api/v2/templates` endpoints to export templates and apply - templates. - + Use the `/api/v2/templates` endpoints to export templates and apply templates. **InfluxDB stacks** are stateful InfluxDB templates that let you - - add, update, and remove installed template resources over time, avoid - duplicating - + add, update, and remove installed template resources over time, avoid duplicating resources when applying the same or similar templates more than once, and - apply changes to distributed instances of InfluxDB OSS or InfluxDB Cloud. - Use the `/api/v2/stacks` endpoints to manage installed template resources. - #### Related guides - - [InfluxDB stacks](/influxdb/v2.3/influxdb-templates/stacks/) - - [InfluxDB templates](/influxdb/v2.3/influxdb-templates/) name: Templates - - description: > + - description: | Manage users for your organization. - Users are those with access to InfluxDB. - To grant a user permission to access data, add them as a member of an - organization and provide them with an API token. - #### User sessions with authorizations - Optionally, you can scope an authorization (and its API token) to a user. - If a user signs in with username and password, creating a _user session_, - - the session carries the permissions granted by all the user's - authorizations. - - To create a user session, use the [`POST - /api/v2/signin`](#operation/PostSignin) endpoint. - + the session carries the permissions granted by all the user's authorizations. + To create a user session, use the [`POST /api/v2/signin` endpoint](#operation/PostSignin). #### Related guides - - - [Manage users](/influxdb/v2.3/influxdb/latest/users/). - - - [Create a token scoped to a - user](/influxdb/v2.3/latest/security/tokens/create-token/#create-a-token-scoped-to-a-user). + - [Manage users](/influxdb/v2.3/influxdb/latest/users/) + - [Create a token scoped to a user](/influxdb/v2.3/latest/security/tokens/create-token/#create-a-token-scoped-to-a-user) name: Users - name: Variables - name: Views diff --git a/api-docs/v2.4/swaggerV1Compat.yml b/api-docs/v2.4/swaggerV1Compat.yml index 1238df657..ea2970e92 100644 --- a/api-docs/v2.4/swaggerV1Compat.yml +++ b/api-docs/v2.4/swaggerV1Compat.yml @@ -2,20 +2,13 @@ openapi: 3.0.0 info: title: InfluxDB OSS v1 compatibility API documentation version: 2.4.0 v1 compatibility - description: > - The InfluxDB 1.x compatibility /write and /query endpoints work with - InfluxDB 1.x client libraries and third-party integrations like Grafana and - others. - - - If you want to use the latest InfluxDB /api/v2 API instead, see the - [InfluxDB v2 API documentation](/influxdb/v2.4/api/). + description: | + The InfluxDB 1.x compatibility /write and /query endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others. + If you want to use the latest InfluxDB /api/v2 API instead, see the [InfluxDB v2 API documentation](/influxdb/v2.4/api/). This documentation is generated from the - - [InfluxDB OpenAPI - specification](https://github.com/influxdata/openapi/blob/influxdb-oss-v2.4.0/contracts/swaggerV1Compat.yml). + [InfluxDB OpenAPI specification](https://github.com/influxdata/openapi/blob/influxdb-oss-v2.4.0/contracts/swaggerV1Compat.yml). servers: - url: / paths: @@ -41,9 +34,7 @@ paths: schema: type: string required: true - description: >- - Bucket to write to. If none exists, InfluxDB creates a bucket with a - default 3-day retention policy. + description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy. - in: query name: rp schema: @@ -56,36 +47,25 @@ paths: description: Write precision. - in: header name: Content-Encoding - description: >- - When present, its value indicates to the database that compression - is applied to the line protocol body. + description: When present, its value indicates to the database that compression is applied to the line protocol body. schema: type: string - description: >- - Specifies that the line protocol in the body is encoded with gzip - or not encoded with identity. + description: Specifies that the line protocol in the body is encoded with gzip or not encoded with identity. default: identity enum: - gzip - identity responses: '204': - description: >- - Write data is correctly formatted and accepted for writing to the - bucket. + description: Write data is correctly formatted and accepted for writing to the bucket. '400': - description: >- - Line protocol poorly formed and no points were written. Response - can be used to determine the first malformed line in the body - line-protocol. All data in body was rejected and not written. + description: Line protocol poorly formed and no points were written. Response can be used to determine the first malformed line in the body line-protocol. All data in body was rejected and not written. content: application/json: schema: $ref: '#/components/schemas/LineProtocolError' '401': - description: >- - Token does not have sufficient permissions to write to this - organization and bucket or the organization and bucket do not exist. + description: Token does not have sufficient permissions to write to this organization and bucket or the organization and bucket do not exist. content: application/json: schema: @@ -97,35 +77,24 @@ paths: schema: $ref: '#/components/schemas/Error' '413': - description: >- - Write has been rejected because the payload is too large. Error - message returns max size supported. All data in body was rejected - and not written. + description: Write has been rejected because the payload is too large. Error message returns max size supported. All data in body was rejected and not written. content: application/json: schema: $ref: '#/components/schemas/LineProtocolLengthError' '429': - description: >- - Token is temporarily over quota. The Retry-After header describes - when to try the write again. + description: Token is temporarily over quota. The Retry-After header describes when to try the write again. headers: Retry-After: - description: >- - A non-negative decimal integer indicating the seconds to delay - after the response is received. + description: A non-negative decimal integer indicating the seconds to delay after the response is received. schema: type: integer format: int32 '503': - description: >- - Server is temporarily unavailable to accept writes. The Retry-After - header describes when to try the write again. + description: Server is temporarily unavailable to accept writes. The Retry-After header describes when to try the write again. headers: Retry-After: - description: >- - A non-negative decimal integer indicating the seconds to delay - after the response is received. + description: A non-negative decimal integer indicating the seconds to delay after the response is received. schema: type: integer format: int32 @@ -155,10 +124,7 @@ paths: name: Accept schema: type: string - description: >- - Specifies how query results should be encoded in the response. - **Note:** With `application/csv`, query results include epoch - timestamps instead of RFC3339 timestamps. + description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps. default: application/json enum: - application/json @@ -167,15 +133,10 @@ paths: - application/x-msgpack - in: header name: Accept-Encoding - description: >- - The Accept-Encoding request HTTP header advertises which content - encoding, usually a compression algorithm, the client is able to - understand. + description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand. schema: type: string - description: >- - Specifies that the query response in the body should be encoded - with gzip or not encoded with identity. + description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity. default: identity enum: - gzip @@ -207,23 +168,16 @@ paths: description: Query results headers: Content-Encoding: - description: >- - The Content-Encoding entity header is used to compress the - media-type. When present, its value indicates which encodings - were applied to the entity-body + description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body schema: type: string - description: >- - Specifies that the response in the body is encoded with gzip - or not encoded with identity. + description: Specifies that the response in the body is encoded with gzip or not encoded with identity. default: identity enum: - gzip - identity Trace-Id: - description: >- - The Trace-Id header reports the request's trace ID, if one was - generated. + description: The Trace-Id header reports the request's trace ID, if one was generated. schema: type: string description: Specifies the request's trace ID. @@ -242,14 +196,10 @@ paths: type: string format: binary '429': - description: >- - Token is temporarily over quota. The Retry-After header describes - when to try the read again. + description: Token is temporarily over quota. The Retry-After header describes when to try the read again. headers: Retry-After: - description: >- - A non-negative decimal integer indicating the seconds to delay - after the response is received. + description: A non-negative decimal integer indicating the seconds to delay after the response is received. schema: type: integer format: int32 @@ -330,11 +280,8 @@ components: items: {} InfluxQLCSVResponse: type: string - example: > - name,tags,time,test_field,test_tag - test_measurement,,1603740794286107366,1,tag_value - test_measurement,,1603740870053205649,2,tag_value - test_measurement,,1603741221085428881,3,tag_value + example: | + name,tags,time,test_field,test_tag test_measurement,,1603740794286107366,1,tag_value test_measurement,,1603740870053205649,2,tag_value test_measurement,,1603741221085428881,3,tag_value Error: properties: code: @@ -379,15 +326,11 @@ components: type: string op: readOnly: true - description: >- - Op describes the logical code operation during error. Useful for - debugging. + description: Op describes the logical code operation during error. Useful for debugging. type: string err: readOnly: true - description: >- - Err is a stack of errors that occurred during processing of the - request. Useful for debugging. + description: Err is a stack of errors that occurred during processing of the request. Useful for debugging. type: string line: readOnly: true @@ -425,30 +368,21 @@ components: type: apiKey name: Authorization in: header - description: > - Use the [Token - authentication](#section/Authentication/TokenAuthentication) - + description: | + Use the [Token authentication](#section/Authentication/TokenAuthentication) scheme to authenticate to the InfluxDB API. - In your API requests, send an `Authorization` header. - - For the header value, provide the word `Token` followed by a space and - an InfluxDB API token. - + For the header value, provide the word `Token` followed by a space and an InfluxDB API token. The word `Token` is case-sensitive. - ### Syntax - `Authorization: Token YOUR_INFLUX_TOKEN` - For examples and more information, see the following: - [`/authorizations`](#tag/Authorizations) endpoint. - [Authorize API requests](/influxdb/cloud/api-guide/api_intro/#authentication). @@ -456,54 +390,37 @@ components: BasicAuthentication: type: http scheme: basic - description: > - Use the HTTP [Basic - authentication](#section/Authentication/BasicAuthentication) - - scheme with clients that support the InfluxDB 1.x convention of username - and password (that don't support the `Authorization: Token` scheme): + description: | + Use the HTTP [Basic authentication](#section/Authentication/BasicAuthentication) + scheme with clients that support the InfluxDB 1.x convention of username and password (that don't support the `Authorization: Token` scheme): - - For examples and more information, see how to [authenticate with a - username and password](/influxdb/cloud/reference/api/influxdb-1x/). + For examples and more information, see how to [authenticate with a username and password](/influxdb/cloud/reference/api/influxdb-1x/). QuerystringAuthentication: type: apiKey in: query name: u=&p= - description: > - Use the [Querystring - authentication](#section/Authentication/QuerystringAuthentication) - - scheme with InfluxDB 1.x API parameters to provide credentials through - the query string. + description: | + Use the [Querystring authentication](#section/Authentication/QuerystringAuthentication) + scheme with InfluxDB 1.x API parameters to provide credentials through the query string. - - For examples and more information, see how to [authenticate with a - username and password](/influxdb/cloud/reference/api/influxdb-1x/). + For examples and more information, see how to [authenticate with a username and password](/influxdb/cloud/reference/api/influxdb-1x/). security: - TokenAuthentication: [] - BasicAuthentication: [] - QuerystringAuthentication: [] tags: - name: Authentication - description: > + description: | The InfluxDB 1.x API requires authentication for all requests. - InfluxDB Cloud uses InfluxDB API tokens to authenticate requests. - For more information, see the following: - - [Token authentication](#section/Authentication/TokenAuthentication) - - [Basic authentication](#section/Authentication/BasicAuthentication) - - - [Querystring - authentication](#section/Authentication/QuerystringAuthentication) - + - [Querystring authentication](#section/Authentication/QuerystringAuthentication) x-traitTag: true From c4c7cc6d31215b9cd3237a5aa135c5035d0d0e50 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Wed, 21 Sep 2022 08:40:30 -0600 Subject: [PATCH 09/17] Flux 0.184 (#4471) * added flux 0.184 placeholder release notes * applied updates to the Flux spec * Flux 0.184 release notes * Flux 0.184 stdlib updates --- content/flux/v0.x/release-notes.md | 20 ++ content/flux/v0.x/spec/_index.md | 6 - content/flux/v0.x/spec/data-model.md | 16 +- content/flux/v0.x/spec/expressions.md | 11 ++ content/flux/v0.x/spec/lexical-elements.md | 134 +++++-------- content/flux/v0.x/spec/options.md | 8 +- content/flux/v0.x/spec/statements.md | 6 - content/flux/v0.x/spec/system-built-ins.md | 6 +- content/flux/v0.x/spec/types.md | 29 +-- .../v0.x/stdlib/experimental/array/tobool.md | 76 +++++++ .../stdlib/experimental/array/toduration.md | 69 +++++++ .../v0.x/stdlib/experimental/array/tofloat.md | 86 ++++++++ .../v0.x/stdlib/experimental/array/toint.md | 74 +++++++ .../stdlib/experimental/array/tostring.md | 72 +++++++ .../v0.x/stdlib/experimental/array/totime.md | 70 +++++++ .../v0.x/stdlib/experimental/array/touint.md | 74 +++++++ .../v0.x/stdlib/experimental/geo/astracks.md | 49 ++++- .../stdlib/experimental/http/requests/get.md | 2 +- .../stdlib/experimental/http/requests/peek.md | 19 +- .../v0.x/stdlib/experimental/polyline/rdp.md | 186 +++++++++--------- content/flux/v0.x/stdlib/http/requests/get.md | 2 +- .../flux/v0.x/stdlib/http/requests/peek.md | 19 +- .../flux/v0.x/stdlib/http/requests/post.md | 8 +- .../v0.x/stdlib/universe/aggregatewindow.md | 2 +- content/flux/v0.x/stdlib/universe/bool.md | 2 +- content/flux/v0.x/stdlib/universe/bottom.md | 2 +- content/flux/v0.x/stdlib/universe/bytes.md | 2 +- content/flux/v0.x/stdlib/universe/contains.md | 2 +- content/flux/v0.x/stdlib/universe/cov.md | 2 +- content/flux/v0.x/stdlib/universe/display.md | 2 +- .../flux/v0.x/stdlib/universe/doubleema.md | 2 +- content/flux/v0.x/stdlib/universe/duration.md | 2 +- content/flux/v0.x/stdlib/universe/filter.md | 6 +- .../flux/v0.x/stdlib/universe/findcolumn.md | 2 +- .../flux/v0.x/stdlib/universe/findrecord.md | 2 +- content/flux/v0.x/stdlib/universe/float.md | 2 +- .../flux/v0.x/stdlib/universe/getcolumn.md | 2 +- .../flux/v0.x/stdlib/universe/getrecord.md | 2 +- .../v0.x/stdlib/universe/highestaverage.md | 2 +- .../v0.x/stdlib/universe/highestcurrent.md | 2 +- .../flux/v0.x/stdlib/universe/highestmax.md | 2 +- content/flux/v0.x/stdlib/universe/increase.md | 2 +- content/flux/v0.x/stdlib/universe/int.md | 2 +- .../flux/v0.x/stdlib/universe/kaufmanser.md | 2 +- content/flux/v0.x/stdlib/universe/length.md | 2 +- .../flux/v0.x/stdlib/universe/linearbins.md | 2 +- .../v0.x/stdlib/universe/logarithmicbins.md | 2 +- .../v0.x/stdlib/universe/lowestaverage.md | 2 +- .../v0.x/stdlib/universe/lowestcurrent.md | 2 +- .../flux/v0.x/stdlib/universe/lowestmin.md | 2 +- content/flux/v0.x/stdlib/universe/max.md | 2 +- content/flux/v0.x/stdlib/universe/median.md | 2 +- content/flux/v0.x/stdlib/universe/pearsonr.md | 2 +- content/flux/v0.x/stdlib/universe/range.md | 2 +- content/flux/v0.x/stdlib/universe/rename.md | 72 ++++++- content/flux/v0.x/stdlib/universe/sample.md | 2 +- content/flux/v0.x/stdlib/universe/set.md | 2 +- content/flux/v0.x/stdlib/universe/skew.md | 2 +- content/flux/v0.x/stdlib/universe/sort.md | 4 +- content/flux/v0.x/stdlib/universe/spread.md | 2 +- .../flux/v0.x/stdlib/universe/statecount.md | 2 +- .../v0.x/stdlib/universe/stateduration.md | 2 +- .../v0.x/stdlib/universe/statetracking.md | 2 +- content/flux/v0.x/stdlib/universe/stddev.md | 2 +- content/flux/v0.x/stdlib/universe/string.md | 2 +- content/flux/v0.x/stdlib/universe/sum.md | 2 +- .../flux/v0.x/stdlib/universe/tablefind.md | 2 +- content/flux/v0.x/stdlib/universe/tail.md | 2 +- content/flux/v0.x/stdlib/universe/time.md | 2 +- .../stdlib/universe/timedmovingaverage.md | 2 +- .../flux/v0.x/stdlib/universe/timeshift.md | 2 +- .../v0.x/stdlib/universe/timeweightedavg.md | 2 +- content/flux/v0.x/stdlib/universe/tobool.md | 2 +- content/flux/v0.x/stdlib/universe/today.md | 2 +- content/flux/v0.x/stdlib/universe/tofloat.md | 2 +- content/flux/v0.x/stdlib/universe/toint.md | 2 +- content/flux/v0.x/stdlib/universe/top.md | 2 +- content/flux/v0.x/stdlib/universe/tostring.md | 2 +- content/flux/v0.x/stdlib/universe/totime.md | 2 +- content/flux/v0.x/stdlib/universe/touint.md | 2 +- .../flux/v0.x/stdlib/universe/tripleema.md | 2 +- .../universe/tripleexponentialderivative.md | 2 +- .../stdlib/universe/truncatetimecolumn.md | 2 +- content/flux/v0.x/stdlib/universe/uint.md | 2 +- content/flux/v0.x/stdlib/universe/union.md | 2 +- content/flux/v0.x/stdlib/universe/unique.md | 2 +- content/flux/v0.x/stdlib/universe/window.md | 2 +- content/flux/v0.x/stdlib/universe/yield.md | 2 +- 88 files changed, 918 insertions(+), 330 deletions(-) create mode 100644 content/flux/v0.x/stdlib/experimental/array/tobool.md create mode 100644 content/flux/v0.x/stdlib/experimental/array/toduration.md create mode 100644 content/flux/v0.x/stdlib/experimental/array/tofloat.md create mode 100644 content/flux/v0.x/stdlib/experimental/array/toint.md create mode 100644 content/flux/v0.x/stdlib/experimental/array/tostring.md create mode 100644 content/flux/v0.x/stdlib/experimental/array/totime.md create mode 100644 content/flux/v0.x/stdlib/experimental/array/touint.md diff --git a/content/flux/v0.x/release-notes.md b/content/flux/v0.x/release-notes.md index 1dc3d7332..b99d68f37 100644 --- a/content/flux/v0.x/release-notes.md +++ b/content/flux/v0.x/release-notes.md @@ -10,6 +10,26 @@ aliases: - /influxdb/cloud/reference/release-notes/flux/ --- +## v0.184.0 [2022-09-21] + +### Breaking changes +- Update logical _null_ handling and align all logical operator implementations +(vectorized, row-based, as well as "in the interpreter") to be consistent and +representative of the Flux SPEC. + +### Features +- Add array type conversion functions to the + [experimental `array` package](/flux/v0.x/stdlib/experimental/array/). + +### Bug fixes +- Update SPEC and fix some inconsistencies. +- Update `sort limit` to skips chunks with no rows. +- Don't report an error about testcases in the LSP. +- Prevent the metadata map from being concurrently mutated. +- Don't stackoverflow on deeply nested expressions. + +--- + ## v0.183.0 [2022-09-12] ### Features diff --git a/content/flux/v0.x/spec/_index.md b/content/flux/v0.x/spec/_index.md index 58359f3ae..44677c85f 100644 --- a/content/flux/v0.x/spec/_index.md +++ b/content/flux/v0.x/spec/_index.md @@ -16,12 +16,6 @@ flux/v0.x/tags: [flux] The following documents specify the Flux language and query execution. -{{% note %}} -This document is a living document and may not represent the current implementation of Flux. -Any section that is not currently implemented is commented with a **[IMPL#XXX]** where -**XXX** is an issue number tracking discussion and progress towards implementation. -{{% /note %}} - {{< children >}} {{< page-nav next="/flux/v0.x/spec/notation/" >}} diff --git a/content/flux/v0.x/spec/data-model.md b/content/flux/v0.x/spec/data-model.md index 4884a12d1..0bc53af7b 100644 --- a/content/flux/v0.x/spec/data-model.md +++ b/content/flux/v0.x/spec/data-model.md @@ -14,12 +14,6 @@ aliases: - /influxdb/cloud/reference/flux/language/data-model/ --- -{{% note %}} -This document is a living document and may not represent the current implementation of Flux. -Any section that is not currently implemented is commented with a **[IMPL#XXX]** where -**XXX** is an issue number tracking discussion and progress towards implementation. -{{% /note %}} - Flux employs a basic data model built from basic data types. The data model consists of tables, records, columns and streams. @@ -54,20 +48,14 @@ These common values are referred to as the "group key value" and can be represen A tables schema consists of its group key and its columns' labels and types. -{{% note %}} -[IMPL#463](https://github.com/influxdata/flux/issues/463) Specify the primitive types that make up stream and table types -{{% /note %}} - ## Stream of tables A **stream** represents a potentially unbounded set of tables. A stream is grouped into individual tables using their respective group keys. Tables within a stream each have a unique group key value. -{{% note %}} -[IMPL#463](https://github.com/influxdata/flux/issues/463) Specify the primitive -types that make up stream and table types -{{% /note %}} +A stream is represented using the stream type `stream[A] where A: Record`. +The group key is not explicity modeled in the Flux type system. ## Missing values (null) diff --git a/content/flux/v0.x/spec/expressions.md b/content/flux/v0.x/spec/expressions.md index 881af38c0..d9aa89704 100644 --- a/content/flux/v0.x/spec/expressions.md +++ b/content/flux/v0.x/spec/expressions.md @@ -131,6 +131,17 @@ mul = (a,b) => a * b Function literals are _closures_ and may refer to variables defined in a surrounding block. Those variables are shared between the function literal and the surrounding block. +Function arguments are named. There are no positional arguments. +Values implementing a function type must use the same argument names. + +```js +apply = (f, x) => f(x: x) + +apply(f: (x) => x + 1, x: 2) // 3 +apply(f: (a) => a + 1, x: 2) // error, function must use the same argument name `x`. +apply(f: (x, a=3) => a + x, x: 2) // 5, extra default arguments are allowed +``` + ## Call expressions A _call expression_ invokes a function with the provided arguments. diff --git a/content/flux/v0.x/spec/lexical-elements.md b/content/flux/v0.x/spec/lexical-elements.md index be90705c1..0cc676c2c 100644 --- a/content/flux/v0.x/spec/lexical-elements.md +++ b/content/flux/v0.x/spec/lexical-elements.md @@ -11,12 +11,6 @@ aliases: - /influxdb/cloud/reference/flux/language/lexical-elements/ --- -{{% note %}} -This document is a living document and may not represent the current implementation of Flux. -Any section that is not currently implemented is commented with a **[IMPL#XXX]** where -**XXX** is an issue number tracking discussion and progress towards implementation. -{{% /note %}} - ## Comments Comment serve as documentation. @@ -63,15 +57,11 @@ longIdentifierName The following keywords are reserved and may not be used as identifiers: ``` -and import not return option test -empty in or package builtin +and import option if +or package builtin then +not return testcase else exists ``` -{{% note %}} -[IMPL#764](https://github.com/influxdata/flux/issues/764) Add `in` and `empty` operator support. -{{% /note %}} - - ## Operators The following character sequences represent operators: @@ -79,26 +69,11 @@ The following character sequences represent operators: ``` + == != ( ) => - < !~ [ ] ^ -* > =~ { } -/ <= = , : +* > =~ { } ? +/ <= = , : " % >= <- . |> ``` -## Numeric literals - -Numeric literals may be integers or floating point values. -Literals have arbitrary precision and are coerced to a specific type when used. - -The following coercion rules apply to numeric literals: - -* An integer literal can be coerced to an "int", "uint", or "float" type, -* A float literal can be coerced to a "float" type. -* An error will occur if the coerced type cannot represent the literal value. - -{{% note %}} -[IMPL#476](https://github.com/influxdata/flux/issues/476) Allow numeric literal coercion. -{{% /note %}} - ### Integer literals An integer literal is a sequence of digits representing an integer value. @@ -196,63 +171,55 @@ These operations are performed on each time unit independently. 5w 1mo5d // 1 month and 5 days -1mo5d // negative 1 month and 5 days -5w * 2 // 10 weeks ``` -Durations can be added to date times to produce a new date time. -Addition and subtraction of durations to date times do not commute and are left associative. -Addition and subtraction of durations to date times applies months, days and seconds in that order. +Durations can be added to date times to produce a new date time. +Addition and subtraction of durations to date times applies months and nanoseconds in that order. When months are added to a date times and the resulting date is past the end of the month, the day is rolled back to the last day of the month. +Of note is that addition and subtraction of durations to date times does not commute. ##### Examples of duration literals ```js -2018-01-01T00:00:00Z + 1d // 2018-01-02T00:00:00Z -2018-01-01T00:00:00Z + 1mo // 2018-02-01T00:00:00Z -2018-01-01T00:00:00Z + 2mo // 2018-03-01T00:00:00Z -2018-01-31T00:00:00Z + 2mo // 2018-03-31T00:00:00Z -2018-02-28T00:00:00Z + 2mo // 2018-04-28T00:00:00Z -2018-01-31T00:00:00Z + 1mo // 2018-02-28T00:00:00Z, February 31th is rolled back to the last day of the month, February 28th in 2018. +import "date" -// Addition and subtraction of durations to date times does not commute -2018-02-28T00:00:00Z + 1mo + 1d // 2018-03-29T00:00:00Z -2018-02-28T00:00:00Z + 1mo + 1d // 2018-03-29T00:00:00Z -2018-02-28T00:00:00Z + 1d + 1mo // 2018-04-01T00:00:00Z -2018-01-01T00:00:00Z + 2mo - 1d // 2018-02-28T00:00:00Z -2018-01-01T00:00:00Z - 1d + 3mo // 2018-03-31T00:00:00Z -2018-01-31T00:00:00Z + 1mo + 1mo // 2018-03-28T00:00:00Z -2018-01-31T00:00:00Z + 2mo // 2018-03-31T00:00:00Z +date.add(d: 1d, to: 2018-01-01T00:00:00Z) // 2018-01-02T00:00:00Z +date.add(d: 1mo, to: 2018-01-01T00:00:00Z) // 2018-02-01T00:00:00Z +date.add(d: 2mo, to: 2018-01-01T00:00:00Z) // 2018-03-01T00:00:00Z +date.add(d: 2mo, to: 2018-01-31T00:00:00Z) // 2018-03-31T00:00:00Z +date.add(d: 2mo, to: 2018-02-28T00:00:00Z) // 2018-04-28T00:00:00Z +date.add(d: 1mo, to: 2018-01-31T00:00:00Z) // 2018-02-28T00:00:00Z, February 31th is rolled back to the last day of the month, February 28th in 2018. -// Addition and subtraction of durations to date times applies months, days and seconds in that order. -2018-01-28T00:00:00Z + 1mo + 2d // 2018-03-02T00:00:00Z -2018-01-28T00:00:00Z + 1mo2d // 2018-03-02T00:00:00Z -2018-01-28T00:00:00Z + 2d + 1mo // 2018-02-28T00:00:00Z, explicit left associative add of 2d first changes the result -2018-02-01T00:00:00Z + 2mo2d // 2018-04-03T00:00:00Z -2018-01-01T00:00:00Z + 1mo30d // 2018-03-02T00:00:00Z, Months are applied first to get February 1st, then days are added resulting in March 2 in 2018. -2018-01-31T00:00:00Z + 1mo1d // 2018-03-01T00:00:00Z, Months are applied first to get February 28th, then days are added resulting in March 1 in 2018. +date.add(d: 1d, to: date.add(d: 1mo, to: 2018-02-28T00:00:00Z)) // 2018-03-29T00:00:00Z +date.add(d: 1mo, to: date.add(d: 1d, to: 2018-02-28T00:00:00Z)) // 2018-04-01T00:00:00Z +date.sub(d: 1d, from: date.add(d: 2mo, to: 2018-01-01T00:00:00Z)) // 2018-02-28T00:00:00Z +date.add(d: 3mo, to: date.sub(d: 1d, from: 2018-01-01T00:00:00Z)) // 2018-03-31T00:00:00Z +date.add(d: 1mo, to: date.add(d: 1mo, to: 2018-01-31T00:00:00Z)) // 2018-03-28T00:00:00Z +date.add(d: 2mo, to: 2018-01-31T00:00:00Z) // 2018-03-31T00:00:00Z -// Multiplication works -2018-01-01T00:00:00Z + 1mo * 1 // 2018-02-01T00:00:00Z -2018-01-01T00:00:00Z + 1mo * 2 // 2018-03-01T00:00:00Z -2018-01-01T00:00:00Z + 1mo * 3 // 2018-04-01T00:00:00Z -2018-01-31T00:00:00Z + 1mo * 1 // 2018-02-28T00:00:00Z -2018-01-31T00:00:00Z + 1mo * 2 // 2018-03-31T00:00:00Z -2018-01-31T00:00:00Z + 1mo * 3 // 2018-04-30T00:00:00Z +// Addition and subtraction of durations to date times applies months and nanoseconds in that order. +date.add(d: 2d, to: date.add(d: 1mo, to: 2018-01-28T00:00:00Z)) // 2018-03-02T00:00:00Z +date.add(d: 1mo2d, to: 2018-01-28T00:00:00Z) // 2018-03-02T00:00:00Z +date.add(d: 1mo, to: date.add(d: 2d, to: 2018-01-28T00:00:00Z)) // 2018-02-28T00:00:00Z, explicit add of 2d first changes the result +date.add(d: 2mo2d, to: 2018-02-01T00:00:00Z) // 2018-04-03T00:00:00Z +date.add(d: 1mo30d, to: 2018-01-01T00:00:00Z) // 2018-03-03T00:00:00Z, Months are applied first to get February 1st, then days are added resulting in March 3 in 2018. +date.add(d: 1mo1d, to: 2018-01-31T00:00:00Z) // 2018-03-01T00:00:00Z, Months are applied first to get February 28th, then days are added resulting in March 1 in 2018. + +// Multiplication and addition of durations to date times +date.add(d: date.scale(d:1mo, n:1), to: 2018-01-01T00:00:00Z) // 2018-02-01T00:00:00Z +date.add(d: date.scale(d:1mo, n:2), to: 2018-01-01T00:00:00Z) // 2018-03-01T00:00:00Z +date.add(d: date.scale(d:1mo, n:3), to: 2018-01-01T00:00:00Z) // 2018-04-01T00:00:00Z +date.add(d: date.scale(d:1mo, n:1), to: 2018-01-31T00:00:00Z) // 2018-02-28T00:00:00Z +date.add(d: date.scale(d:1mo, n:2), to: 2018-01-31T00:00:00Z) // 2018-03-31T00:00:00Z +date.add(d: date.scale(d:1mo, n:3), to: 2018-01-31T00:00:00Z) // 2018-04-30T00:00:00Z ``` -{{% note %}} -[IMPL#413](https://github.com/influxdata/flux/issues/413) Implement Duration vectors. -{{% /note %}} - ## Date and time literals A _date and time literal_ represents a specific moment in time. It has a date part, a time part and a time offset part. The format follows the [RFC 3339](https://tools.ietf.org/html/rfc3339) specification. -The time is optional. -When it is omitted, the time is assumed to be midnight for the default location. -The `time_offset` is optional. -When it is omitted, the location option is used to determine the offset. +The time is optional. When it is omitted the time is assumed to be midnight UTC. ```js date_time_lit = date [ "T" time ] . @@ -260,7 +227,7 @@ date = year "-" month "-" day . year = decimal_digit decimal_digit decimal_digit decimal_digit . month = decimal_digit decimal_digit . day = decimal_digit decimal_digit . -time = hour ":" minute ":" second [ fractional_second ] [ time_offset ] . +time = hour ":" minute ":" second [ fractional_second ] time_offset . hour = decimal_digit decimal_digit . minute = decimal_digit decimal_digit . second = decimal_digit decimal_digit . @@ -273,14 +240,9 @@ time_offset = "Z" | ("+" | "-" ) hour ":" minute . ```js 1952-01-25T12:35:51Z 2018-08-15T13:36:23-07:00 -2009-10-15T09:00:00 // October 15th 2009 at 9 AM in the default location -2018-01-01 // midnight on January 1st 2018 in the default location +2018-01-01 // midnight on January 1st 2018 UTC ``` -{{% note %}} -[IMPL#152](https://github.com/influxdata/flux/issues/152) Implement shorthand time literals. -{{% /note %}} - ### String literals A _string literal_ represents a sequence of characters enclosed in double quotes. @@ -297,6 +259,7 @@ String literals support several escape sequences. ``` Additionally, any byte value may be specified via a hex encoding using `\x` as the prefix. +The hex encoding of values must result in a valid UTF-8 sequence. ``` string_lit = `"` { unicode_value | byte_value | StringExpression | newline } `"` . @@ -322,13 +285,8 @@ Embedded expressions are enclosed in a dollar sign and curly braces (`${}`). The expressions are evaluated in the scope containing the string literal. The result of an expression is formatted as a string and replaces the string content between the braces. All types are formatted as strings according to their literal representation. -A function `printf` exists to allow more precise control over formatting of various types. To include the literal `${` within a string, it must be escaped. -{{% note %}} -[IMPL#731](https://github.com/influxdata/flux/issues/731) Add printf function. -{{% /note %}} - ##### Example: Interpolation ```js @@ -344,16 +302,17 @@ A _regular expression literal_ represents a regular expression pattern, enclosed Within the forward slashes, any unicode character may appear except for an unescaped forward slash. The `\x` hex byte value representation from string literals may also be present. -Regular expression literals support only the following escape sequences: +In addition to standard escape sequences, regular expression literals also +support the following escape sequences: ``` \/ U+002f forward slash -\\ U+005c backslash ``` ``` -regexp_lit = "/" { unicode_char | byte_value | regexp_escape_char } "/" . -regexp_escape_char = `\` (`/` | `\`) +regexp_lit = "/" regexp_char { regexp_char } "/" . +regexp_char = unicode_char | byte_value | regexp_escape_char . +regexp_escape_char = `\/` ``` ##### Examples of regular expression literals @@ -363,7 +322,8 @@ regexp_escape_char = `\` (`/` | `\`) /http:\/\/localhost:8086/ /^\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e(ZZ)?$/ /^日本語(ZZ)?$/ // the above two lines are equivalent -/\\xZZ/ // this becomes the literal pattern "\xZZ" +/a\/b\s\w/ // escape sequences and character class shortcuts are supported +/(?:)/ // the empty regular expression ``` The regular expression syntax is defined by [RE2](https://github.com/google/re2/wiki/Syntax). diff --git a/content/flux/v0.x/spec/options.md b/content/flux/v0.x/spec/options.md index b5075d4b5..71ca5f011 100644 --- a/content/flux/v0.x/spec/options.md +++ b/content/flux/v0.x/spec/options.md @@ -23,6 +23,10 @@ Below is a list of built-in options currently implemented in the Flux language: - [task](#task) - [location](#location) +Options are not closed, meaning new options may be defined and consumed within packages and scripts. +Changing the value of an option for a package changes the value for all references +to that option from any other package. + #### now The `now` option is a function that returns a time value used as a proxy for the current system time. @@ -51,10 +55,10 @@ The default value is [`timezone.utc`](/flux/v0.x/stdlib/timezone/#constants). ```js import "timezone" -// Set timezone to be 5 hours west of UTC. +// Set timezone to be 5 hours west of UTC option location = timezone.fixed(offset: -5h) -// Set location to be America/Denver. +// Set location to be America/Denver option location = timezone.location(name: "America/Denver") ``` diff --git a/content/flux/v0.x/spec/statements.md b/content/flux/v0.x/spec/statements.md index 11ca2da12..e5bd99ff2 100644 --- a/content/flux/v0.x/spec/statements.md +++ b/content/flux/v0.x/spec/statements.md @@ -11,12 +11,6 @@ aliases: - /influxdb/cloud/reference/flux/language/statements/ --- -{{% note %}} -This document is a living document and may not represent the current implementation of Flux. -Any section that is not currently implemented is commented with a **[IMPL#XXX]** where -**XXX** is an issue number tracking discussion and progress towards implementation. -{{% /note %}} - A _statement_ controls execution. ```js diff --git a/content/flux/v0.x/spec/system-built-ins.md b/content/flux/v0.x/spec/system-built-ins.md index 2a8c350db..6850bc6c9 100644 --- a/content/flux/v0.x/spec/system-built-ins.md +++ b/content/flux/v0.x/spec/system-built-ins.md @@ -23,10 +23,12 @@ All such values must have a corresponding builtin statement to declare the exist BuiltinStatement = "builtin" identifier ":" TypeExpression . TypeExpression = MonoType ["where" Constraints] . -MonoType = Tvar | BasicType | ArrayType | RecordType | FunctionType . +MonoType = Tvar | BasicType | ArrayType | StreamType | VectorType | RecordType | FunctionType . Tvar = "A" … "Z" . BasicType = "int" | "uint" | "float" | "string" | "bool" | "time" | "duration" | "bytes" | "regexp" . ArrayType = "[" MonoType "]" . +StreamType = "stream" "[" MonoType "]" . +VectorType = "vector" "[" MonoType "]" . RecordType = ( "{" [RecordTypeProperties] "}" ) | ( "{" Tvar "with" RecordTypeProperties "}" ) . FunctionType = "(" [FunctionTypeParameters] ")" "=>" MonoType . @@ -45,7 +47,7 @@ Kinds = identifier { "+" identifier } . ##### Example ```js -builtin filter : (<-tables: [T], fn: (r: T) => bool) => [T] +builtin filter : (<-tables: stream[T], fn: (r: T) => bool) => stream[T] ``` {{< page-nav prev="/flux/v0.x/spec/side-effects/" next="/flux/v0.x/spec/data-model/" >}} diff --git a/content/flux/v0.x/spec/types.md b/content/flux/v0.x/spec/types.md index f1e2efad7..2762a96c1 100644 --- a/content/flux/v0.x/spec/types.md +++ b/content/flux/v0.x/spec/types.md @@ -11,12 +11,6 @@ aliases: - /influxdb/cloud/reference/flux/language/types/ --- -{{% note %}} -This document is a living document and may not represent the current implementation of Flux. -Any section that is not currently implemented is commented with a **[IMPL#XXX]** where -**XXX** is an issue number tracking discussion and progress towards implementation. -{{% /note %}} - A **type** defines the set of values and operations on those values. Types are never explicitly declared as part of the syntax except as part of a [builtin statement](/flux/v0.x/spec/system-built-ins/). Types are always inferred from the usage of the value. @@ -88,9 +82,7 @@ Flux supports [RFC3339 timestamps](/influxdb/cloud/reference/glossary/#rfc3339-t ### Duration types A _duration type_ represents a length of time with nanosecond precision. The duration type name is `duration`. -The duration type is nullable - -Durations can be added to times to produce a new time. +The duration type is nullable. ##### Examples of duration types ```js @@ -108,10 +100,6 @@ Durations can be added to times to produce a new time. 3d12h4m25s // 3 days, 12 hours, 4 minutes, and 25 seconds ``` -{{% note %}} -[IMPL#2026](https://github.com/influxdata/flux/issues/2026) Operator for time arithmetic -{{% /note %}} - ### String types A _string type_ represents a possibly empty sequence of characters. Strings are immutable and cannot be modified once created. @@ -122,9 +110,6 @@ The string type is nullable. An empty string is **not** a _null_ value. {{% /note %}} -The length of a string is its size in bytes, not the number of characters, -since a single character may be multiple bytes. - ### Bytes types A _bytes type_ represents a sequence of byte values. The bytes type name is `bytes`. @@ -162,14 +147,12 @@ Values must also be of the same type. ### Function types A _function type_ represents a set of all functions with the same argument and result types. +Functions arguments are always named (there are no positional arguments). +Therefore implementing a function type requires that the arguments be named the same. -### Generator types -A _generator type_ represents a value that produces an unknown number of other values. -The generated values may be of any other type, but must all be the same type. - -{{% note %}} -[IMPL#412](https://github.com/influxdata/flux/issues/412) Implement Generators types. -{{% /note %}} +### Stream types +A _stream type_ represents an unbounded collection of values. +The values must be records and those records may only hold int, uint, float, string, time or bool types. ## Polymorphism Flux functions can be polymorphic, meaning a function can be applied to arguments of different types. diff --git a/content/flux/v0.x/stdlib/experimental/array/tobool.md b/content/flux/v0.x/stdlib/experimental/array/tobool.md new file mode 100644 index 000000000..af83ad80e --- /dev/null +++ b/content/flux/v0.x/stdlib/experimental/array/tobool.md @@ -0,0 +1,76 @@ +--- +title: array.toBool() function +description: > + `array.toBool()` converts all values in an array to booleans. +menu: + flux_0_x_ref: + name: array.toBool + parent: experimental/array + identifier: experimental/array/toBool +weight: 201 +flux/v0.x/tags: [type-conversions] +introduced: 0.184.0 +--- + + + +`array.toBool()` converts all values in an array to booleans. + +#### Supported array types + +- `[string]` with values `true` or `false` +- `[int]` with values `1` or `0` +- `[uint]` with values `1` or `0` +- `[float]` with values `1.0` or `0.0` + +##### Function type signature + +```js +(<-arr: [A]) => [bool] +``` + +{{% caption %}}For more information, see [Function type signatures](/flux/v0.x/function-type-signatures/).{{% /caption %}} + +## Parameters + +### arr + +Array of values to convert. Default is the piped-forward array (`<-`). + + + + +## Examples + +### Convert an array of integers to booleans + +```js +import "experimental/array" + +arr = [ + 1, + 1, + 0, + 1, + 0, +] + +array.toBool(arr: arr)// Returns [true, true, false, true, false] + + +``` + diff --git a/content/flux/v0.x/stdlib/experimental/array/toduration.md b/content/flux/v0.x/stdlib/experimental/array/toduration.md new file mode 100644 index 000000000..54ebd4603 --- /dev/null +++ b/content/flux/v0.x/stdlib/experimental/array/toduration.md @@ -0,0 +1,69 @@ +--- +title: array.toDuration() function +description: > + `array.toDuration()` converts all values in an array to durations. +menu: + flux_0_x_ref: + name: array.toDuration + parent: experimental/array + identifier: experimental/array/toDuration +weight: 201 +flux/v0.x/tags: [type-conversions] +introduced: 0.184.0 +--- + + + +`array.toDuration()` converts all values in an array to durations. + +#### Supported array types and behaviors + +- `[int]` (parsed as nanosecond epoch timestamps) +- `[string]` with values that use [duration literal](/flux/v0.x/data-types/basic/duration/#duration-syntax) representation. +- `[uint]` (parsed as nanosecond epoch timestamps) + +##### Function type signature + +```js +(<-arr: [A]) => [duration] +``` + +{{% caption %}}For more information, see [Function type signatures](/flux/v0.x/function-type-signatures/).{{% /caption %}} + +## Parameters + +### arr + +Array of values to convert. Default is the piped-forward array (`<-`). + + + + +## Examples + +### Convert an array of integers to durations + +```js +import "experimental/array" + +arr = [80000000000, 56000000000, 132000000000] + +array.toDuration(arr: arr)// Returns [1m20s, 56s, 2m12s] + + +``` + diff --git a/content/flux/v0.x/stdlib/experimental/array/tofloat.md b/content/flux/v0.x/stdlib/experimental/array/tofloat.md new file mode 100644 index 000000000..991f1a43b --- /dev/null +++ b/content/flux/v0.x/stdlib/experimental/array/tofloat.md @@ -0,0 +1,86 @@ +--- +title: array.toFloat() function +description: > + `array.toFloat()` converts all values in an array to floats. +menu: + flux_0_x_ref: + name: array.toFloat + parent: experimental/array + identifier: experimental/array/toFloat +weight: 201 +flux/v0.x/tags: [type-conversions] +introduced: 0.184.0 +--- + + + +`array.toFloat()` converts all values in an array to floats. + +#### Supported array types + +- `[string]` (numeric, scientific notation, ±Inf, or NaN) +- `[bool]` +- `[int]` +- `[uint]` + +##### Function type signature + +```js +(<-arr: [A]) => [float] +``` + +{{% caption %}}For more information, see [Function type signatures](/flux/v0.x/function-type-signatures/).{{% /caption %}} + +## Parameters + +### arr + +Array of values to convert. Default is the piped-forward array (`<-`). + + + + +## Examples + +- [Convert an array of integers to floats](#convert-an-array-of-integers-to-floats) +- [Convert an array of strings to floats](#convert-an-array-of-strings-to-floats) + +### Convert an array of integers to floats + +```js +import "experimental/array" + +arr = [12, 24, 36, 48] + +array.toFloat(arr: arr)// Returns [12.0, 24.0, 36.0, 48.0] + + +``` + + +### Convert an array of strings to floats + +```js +import "experimental/array" + +arr = ["12", "1.23e+4", "NaN", "24.2"] + +array.toFloat(arr: arr)// Returns [12.0, 1.2300, NaN, 24.2] + + +``` + diff --git a/content/flux/v0.x/stdlib/experimental/array/toint.md b/content/flux/v0.x/stdlib/experimental/array/toint.md new file mode 100644 index 000000000..c7eb6518b --- /dev/null +++ b/content/flux/v0.x/stdlib/experimental/array/toint.md @@ -0,0 +1,74 @@ +--- +title: array.toInt() function +description: > + `array.toInt()` converts all values in an array to integers. +menu: + flux_0_x_ref: + name: array.toInt + parent: experimental/array + identifier: experimental/array/toInt +weight: 201 +flux/v0.x/tags: [type-conversions] +introduced: 0.184.0 +--- + + + +`array.toInt()` converts all values in an array to integers. + +#### Supported array types and behaviors + +| Array type | Returned array values | +| :----------- | :----------------------------------------- | +| `[bool]` | 1 (true) or 0 (false) | +| `[duration]` | Number of nanoseconds in the duration | +| `[float]` | Value truncated at the decimal | +| `[string]` | Integer equivalent of the numeric string | +| `[time]` | Equivalent nanosecond epoch timestamp | +| `[uint]` | Integer equivalent of the unsigned integer | + +##### Function type signature + +```js +(<-arr: [A]) => [int] +``` + +{{% caption %}}For more information, see [Function type signatures](/flux/v0.x/function-type-signatures/).{{% /caption %}} + +## Parameters + +### arr + +Array of values to convert. Default is the piped-forward array (`<-`). + + + + +## Examples + +### Convert an array of floats to integers + +```js +import "experimental/array" + +arr = [12.1, 24.2, 36.3, 48.4] + +array.toInt(arr: arr)// Returns [12, 24, 36, 48] + + +``` + diff --git a/content/flux/v0.x/stdlib/experimental/array/tostring.md b/content/flux/v0.x/stdlib/experimental/array/tostring.md new file mode 100644 index 000000000..6e375eab0 --- /dev/null +++ b/content/flux/v0.x/stdlib/experimental/array/tostring.md @@ -0,0 +1,72 @@ +--- +title: array.toString() function +description: > + `array.toString()` converts all values in an array to strings. +menu: + flux_0_x_ref: + name: array.toString + parent: experimental/array + identifier: experimental/array/toString +weight: 201 +flux/v0.x/tags: [type-conversions] +introduced: 0.184.0 +--- + + + +`array.toString()` converts all values in an array to strings. + +#### Supported array types + +- `[bool]` +- `[duration]` +- `[float]` +- `[int]` +- `[time]` +- `[uint]` + +##### Function type signature + +```js +(<-arr: [A]) => [string] +``` + +{{% caption %}}For more information, see [Function type signatures](/flux/v0.x/function-type-signatures/).{{% /caption %}} + +## Parameters + +### arr + +Array of values to convert. Default is the piped-forward array (`<-`). + + + + +## Examples + +### Convert an array of floats to strings + +```js +import "experimental/array" + +arr = [12.0, 1.23, NaN, 24.2] + +array.toString(arr: arr)// Returns ["12.0", "1.2300", "NaN", "24.2"] + + +``` + diff --git a/content/flux/v0.x/stdlib/experimental/array/totime.md b/content/flux/v0.x/stdlib/experimental/array/totime.md new file mode 100644 index 000000000..80c403aeb --- /dev/null +++ b/content/flux/v0.x/stdlib/experimental/array/totime.md @@ -0,0 +1,70 @@ +--- +title: array.toTime() function +description: > + `array.toTime()` converts all values in an array to times. +menu: + flux_0_x_ref: + name: array.toTime + parent: experimental/array + identifier: experimental/array/toTime +weight: 201 +flux/v0.x/tags: [type-conversions] +introduced: 0.184.0 +--- + + + +`array.toTime()` converts all values in an array to times. + +#### Supported array types + +- `[int]` (parsed as nanosecond epoch timestamps) +- `[string]` with values that use [time literal](/flux/v0.x/data-types/basic/time/#time-syntax) + representation (RFC3339 timestamps). +- `[uint]` (parsed as nanosecond epoch timestamps) + +##### Function type signature + +```js +(<-arr: [A]) => [time] +``` + +{{% caption %}}For more information, see [Function type signatures](/flux/v0.x/function-type-signatures/).{{% /caption %}} + +## Parameters + +### arr + +Array of values to convert. Default is the piped-forward array (`<-`). + + + + +## Examples + +### Convert an array of integers to time values + +```js +import "experimental/array" + +arr = [1640995200000000000, 1643673600000000000, 1646092800000000000] + +array.toTime(arr: arr)// Returns [2022-01-01T00:00:00Z, 2022-02-01T00:00:00Z, 2022-03-01T00:00:00Z] + + +``` + diff --git a/content/flux/v0.x/stdlib/experimental/array/touint.md b/content/flux/v0.x/stdlib/experimental/array/touint.md new file mode 100644 index 000000000..ea37558ad --- /dev/null +++ b/content/flux/v0.x/stdlib/experimental/array/touint.md @@ -0,0 +1,74 @@ +--- +title: array.toUInt() function +description: > + `array.toUInt()` converts all values in an array to unsigned integers. +menu: + flux_0_x_ref: + name: array.toUInt + parent: experimental/array + identifier: experimental/array/toUInt +weight: 201 +flux/v0.x/tags: [type-conversions] +introduced: 0.184.0 +--- + + + +`array.toUInt()` converts all values in an array to unsigned integers. + +#### Supported array types and behaviors + +| Array type | Returned array values | +| :----------- | :----------------------------------------- | +| `[bool]` | 1 (true) or 0 (false) | +| `[duration]` | Number of nanoseconds in the duration | +| `[float]` | Value truncated at the decimal | +| `[int]` | Unsigned integer equivalent of the integer | +| `[string]` | Integer equivalent of the numeric string | +| `[time]` | Equivalent nanosecond epoch timestamp | + +##### Function type signature + +```js +(<-arr: [A]) => [uint] +``` + +{{% caption %}}For more information, see [Function type signatures](/flux/v0.x/function-type-signatures/).{{% /caption %}} + +## Parameters + +### arr + +Array of values to convert. Default is the piped-forward array (`<-`). + + + + +## Examples + +### Convert an array of floats to usigned integers + +```js +import "experimental/array" + +arr = [-12.1, 24.2, -36.3, 48.4] + +array.toInt(arr: arr)// Returns [18446744073709551604, 24, 18446744073709551580, 48] + + +``` + diff --git a/content/flux/v0.x/stdlib/experimental/geo/astracks.md b/content/flux/v0.x/stdlib/experimental/geo/astracks.md index 4b8fe48e6..2bbdbb9ab 100644 --- a/content/flux/v0.x/stdlib/experimental/geo/astracks.md +++ b/content/flux/v0.x/stdlib/experimental/geo/astracks.md @@ -20,7 +20,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/experimental/geo/geo.flux#L982-L985 +https://github.com/influxdata/flux/blob/master/stdlib/experimental/geo/geo.flux#L1008-L1011 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md @@ -52,7 +52,7 @@ Default is `["id","tid"]`. Columns to order results by. Default is `["_time"]`. - +Sort precedence is determined by list order (left to right). ### tables @@ -63,6 +63,9 @@ Input data. Default is piped-forward data (`<-`). ## Examples +- [Group geotemporal data into tracks](#group-geotemporal-data-into-tracks) +- [Group geotemporal data into tracks and sort by specified columns](#group-geotemporal-data-into-tracks-and-sort-by-specified-columns) + ### Group geotemporal data into tracks ```js @@ -104,3 +107,45 @@ data {{% /expand %}} {{< /expand-wrapper >}} + +### Group geotemporal data into tracks and sort by specified columns + +```js +import "experimental/geo" + +data + |> geo.asTracks(orderBy: ["lat", "lon"]) + +``` + +{{< expand-wrapper >}} +{{% expand "View example input and ouput" %}} + +#### Input data + +| _time | id | lat | lon | +| -------------------- | ----- | -------- | -------- | +| 2021-01-01T00:00:00Z | a213b | 14.01433 | -14.5464 | +| 2021-01-02T01:00:00Z | a213b | 13.9228 | -13.3338 | +| 2021-01-03T02:00:00Z | a213b | 15.08433 | -12.0433 | +| 2021-01-01T00:00:00Z | b546c | 14.01433 | 39.7515 | +| 2021-01-02T01:00:00Z | b546c | 13.9228 | 38.3527 | +| 2021-01-03T02:00:00Z | b546c | 15.08433 | 36.9978 | + + +#### Output data + +| _time | *id | lat | lon | +| -------------------- | ----- | -------- | -------- | +| 2021-01-02T01:00:00Z | a213b | 13.9228 | -13.3338 | +| 2021-01-01T00:00:00Z | a213b | 14.01433 | -14.5464 | +| 2021-01-03T02:00:00Z | a213b | 15.08433 | -12.0433 | + +| _time | *id | lat | lon | +| -------------------- | ----- | -------- | ------- | +| 2021-01-02T01:00:00Z | b546c | 13.9228 | 38.3527 | +| 2021-01-01T00:00:00Z | b546c | 14.01433 | 39.7515 | +| 2021-01-03T02:00:00Z | b546c | 15.08433 | 36.9978 | + +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/flux/v0.x/stdlib/experimental/http/requests/get.md b/content/flux/v0.x/stdlib/experimental/http/requests/get.md index 823c7b686..355cdbb29 100644 --- a/content/flux/v0.x/stdlib/experimental/http/requests/get.md +++ b/content/flux/v0.x/stdlib/experimental/http/requests/get.md @@ -133,7 +133,7 @@ array.from(rows: [{name: data.name, age: data.age}]) | name | age | | --------- | ---- | -| nathaniel | 30 | +| nathaniel | 60 | {{% /expand %}} {{< /expand-wrapper >}} diff --git a/content/flux/v0.x/stdlib/experimental/http/requests/peek.md b/content/flux/v0.x/stdlib/experimental/http/requests/peek.md index a83d5a0c3..86ac52091 100644 --- a/content/flux/v0.x/stdlib/experimental/http/requests/peek.md +++ b/content/flux/v0.x/stdlib/experimental/http/requests/peek.md @@ -78,22 +78,23 @@ requests.peek(response: requests.get(url: "https://api.agify.io", params: ["name #### Output data -| body | duration | headers | statusCode | -| ----------------------------------------- | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | -| {"name":"natalie","age":34,"count":20959} | 100000000 | [ - Access-Control-Allow-Headers: Content-Type, X-Genderize-Source, - Access-Control-Allow-Methods: GET, +| body | duration | headers | statusCode | +| ----------------------------------------- | --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | +| {"age":48,"count":25082,"name":"natalie"} | 100000000 | [ + Access-Control-Allow-Credentials: true, Access-Control-Allow-Origin: *, + Access-Control-Expose-Headers: x-rate-limit-limit,x-rate-limit-remaining,x-rate-limit-reset, + Cache-Control: max-age=0, private, must-revalidate, Connection: keep-alive, Content-Length: 41, Content-Type: application/json; charset=utf-8, - Date: Mon, 12 Sep 2022 19:26:11 GMT, - Etag: W/"29-klDahUESBLxHyQ7NiaetCn2CvCI", + Date: Wed, 21 Sep 2022 10:08:38 GMT, Server: nginx/1.16.1, X-Rate-Limit-Limit: 1000, X-Rate-Limit-Remaining: 998, - X-Rate-Reset: 16429 -] | 200 | + X-Rate-Limit-Reset: 49882, + X-Request-Id: FxbYSI-0Y_v6ACsAkbHS +] | 200 | {{% /expand %}} {{< /expand-wrapper >}} diff --git a/content/flux/v0.x/stdlib/experimental/polyline/rdp.md b/content/flux/v0.x/stdlib/experimental/polyline/rdp.md index 49d28f01c..5cb32d70b 100644 --- a/content/flux/v0.x/stdlib/experimental/polyline/rdp.md +++ b/content/flux/v0.x/stdlib/experimental/polyline/rdp.md @@ -107,44 +107,44 @@ data | _time | _value | | -------------------- | ------------------- | -| 2022-09-12T19:23:30Z | 10.56555566168836 | -| 2022-09-12T19:23:40Z | -29.76098586714259 | -| 2022-09-12T19:23:50Z | -67.50435038579738 | -| 2022-09-12T19:24:00Z | -16.758669047964453 | -| 2022-09-12T19:24:10Z | -47.25865245658065 | -| 2022-09-12T19:24:20Z | 66.16082461651365 | -| 2022-09-12T19:24:30Z | -0.9179216017921821 | -| 2022-09-12T19:24:40Z | -56.89169240573004 | -| 2022-09-12T19:24:50Z | 11.358605472976624 | -| 2022-09-12T19:25:00Z | 28.71147881415803 | -| 2022-09-12T19:25:10Z | -30.928830759588756 | -| 2022-09-12T19:25:20Z | -22.411848631056067 | -| 2022-09-12T19:25:30Z | 17.05503606764129 | -| 2022-09-12T19:25:40Z | 9.834382683760559 | -| 2022-09-12T19:25:50Z | -12.62058579127679 | -| 2022-09-12T19:26:00Z | -44.44668391211515 | +| 2022-09-21T10:05:50Z | 10.56555566168836 | +| 2022-09-21T10:06:00Z | -29.76098586714259 | +| 2022-09-21T10:06:10Z | -67.50435038579738 | +| 2022-09-21T10:06:20Z | -16.758669047964453 | +| 2022-09-21T10:06:30Z | -47.25865245658065 | +| 2022-09-21T10:06:40Z | 66.16082461651365 | +| 2022-09-21T10:06:50Z | -0.9179216017921821 | +| 2022-09-21T10:07:00Z | -56.89169240573004 | +| 2022-09-21T10:07:10Z | 11.358605472976624 | +| 2022-09-21T10:07:20Z | 28.71147881415803 | +| 2022-09-21T10:07:30Z | -30.928830759588756 | +| 2022-09-21T10:07:40Z | -22.411848631056067 | +| 2022-09-21T10:07:50Z | 17.05503606764129 | +| 2022-09-21T10:08:00Z | 9.834382683760559 | +| 2022-09-21T10:08:10Z | -12.62058579127679 | +| 2022-09-21T10:08:20Z | -44.44668391211515 | #### Output data | _time | _value | | -------------------- | ------------------- | -| 2022-09-12T19:23:30Z | 10.56555566168836 | -| 2022-09-12T19:23:40Z | -29.76098586714259 | -| 2022-09-12T19:23:50Z | -67.50435038579738 | -| 2022-09-12T19:24:00Z | -16.758669047964453 | -| 2022-09-12T19:24:10Z | -47.25865245658065 | -| 2022-09-12T19:24:20Z | 66.16082461651365 | -| 2022-09-12T19:24:30Z | -0.9179216017921821 | -| 2022-09-12T19:24:40Z | -56.89169240573004 | -| 2022-09-12T19:24:50Z | 11.358605472976624 | -| 2022-09-12T19:25:00Z | 28.71147881415803 | -| 2022-09-12T19:25:10Z | -30.928830759588756 | -| 2022-09-12T19:25:20Z | -22.411848631056067 | -| 2022-09-12T19:25:30Z | 17.05503606764129 | -| 2022-09-12T19:25:40Z | 9.834382683760559 | -| 2022-09-12T19:25:50Z | -12.62058579127679 | -| 2022-09-12T19:26:00Z | -44.44668391211515 | +| 2022-09-21T10:05:50Z | 10.56555566168836 | +| 2022-09-21T10:06:00Z | -29.76098586714259 | +| 2022-09-21T10:06:10Z | -67.50435038579738 | +| 2022-09-21T10:06:20Z | -16.758669047964453 | +| 2022-09-21T10:06:30Z | -47.25865245658065 | +| 2022-09-21T10:06:40Z | 66.16082461651365 | +| 2022-09-21T10:06:50Z | -0.9179216017921821 | +| 2022-09-21T10:07:00Z | -56.89169240573004 | +| 2022-09-21T10:07:10Z | 11.358605472976624 | +| 2022-09-21T10:07:20Z | 28.71147881415803 | +| 2022-09-21T10:07:30Z | -30.928830759588756 | +| 2022-09-21T10:07:40Z | -22.411848631056067 | +| 2022-09-21T10:07:50Z | 17.05503606764129 | +| 2022-09-21T10:08:00Z | 9.834382683760559 | +| 2022-09-21T10:08:10Z | -12.62058579127679 | +| 2022-09-21T10:08:20Z | -44.44668391211515 | {{% /expand %}} {{< /expand-wrapper >}} @@ -166,43 +166,43 @@ data | _time | _value | | -------------------- | ------------------- | -| 2022-09-12T19:23:30Z | 10.56555566168836 | -| 2022-09-12T19:23:40Z | -29.76098586714259 | -| 2022-09-12T19:23:50Z | -67.50435038579738 | -| 2022-09-12T19:24:00Z | -16.758669047964453 | -| 2022-09-12T19:24:10Z | -47.25865245658065 | -| 2022-09-12T19:24:20Z | 66.16082461651365 | -| 2022-09-12T19:24:30Z | -0.9179216017921821 | -| 2022-09-12T19:24:40Z | -56.89169240573004 | -| 2022-09-12T19:24:50Z | 11.358605472976624 | -| 2022-09-12T19:25:00Z | 28.71147881415803 | -| 2022-09-12T19:25:10Z | -30.928830759588756 | -| 2022-09-12T19:25:20Z | -22.411848631056067 | -| 2022-09-12T19:25:30Z | 17.05503606764129 | -| 2022-09-12T19:25:40Z | 9.834382683760559 | -| 2022-09-12T19:25:50Z | -12.62058579127679 | -| 2022-09-12T19:26:00Z | -44.44668391211515 | +| 2022-09-21T10:05:50Z | 10.56555566168836 | +| 2022-09-21T10:06:00Z | -29.76098586714259 | +| 2022-09-21T10:06:10Z | -67.50435038579738 | +| 2022-09-21T10:06:20Z | -16.758669047964453 | +| 2022-09-21T10:06:30Z | -47.25865245658065 | +| 2022-09-21T10:06:40Z | 66.16082461651365 | +| 2022-09-21T10:06:50Z | -0.9179216017921821 | +| 2022-09-21T10:07:00Z | -56.89169240573004 | +| 2022-09-21T10:07:10Z | 11.358605472976624 | +| 2022-09-21T10:07:20Z | 28.71147881415803 | +| 2022-09-21T10:07:30Z | -30.928830759588756 | +| 2022-09-21T10:07:40Z | -22.411848631056067 | +| 2022-09-21T10:07:50Z | 17.05503606764129 | +| 2022-09-21T10:08:00Z | 9.834382683760559 | +| 2022-09-21T10:08:10Z | -12.62058579127679 | +| 2022-09-21T10:08:20Z | -44.44668391211515 | #### Output data | _time | _value | | -------------------- | ------------------- | -| 2022-09-12T19:23:30Z | 10.56555566168836 | -| 2022-09-12T19:23:50Z | -67.50435038579738 | -| 2022-09-12T19:24:00Z | -16.758669047964453 | -| 2022-09-12T19:24:10Z | -47.25865245658065 | -| 2022-09-12T19:24:20Z | 66.16082461651365 | -| 2022-09-12T19:24:30Z | -0.9179216017921821 | -| 2022-09-12T19:24:40Z | -56.89169240573004 | -| 2022-09-12T19:24:50Z | 11.358605472976624 | -| 2022-09-12T19:25:00Z | 28.71147881415803 | -| 2022-09-12T19:25:10Z | -30.928830759588756 | -| 2022-09-12T19:25:20Z | -22.411848631056067 | -| 2022-09-12T19:25:30Z | 17.05503606764129 | -| 2022-09-12T19:25:40Z | 9.834382683760559 | -| 2022-09-12T19:25:50Z | -12.62058579127679 | -| 2022-09-12T19:26:00Z | -44.44668391211515 | +| 2022-09-21T10:05:50Z | 10.56555566168836 | +| 2022-09-21T10:06:10Z | -67.50435038579738 | +| 2022-09-21T10:06:20Z | -16.758669047964453 | +| 2022-09-21T10:06:30Z | -47.25865245658065 | +| 2022-09-21T10:06:40Z | 66.16082461651365 | +| 2022-09-21T10:06:50Z | -0.9179216017921821 | +| 2022-09-21T10:07:00Z | -56.89169240573004 | +| 2022-09-21T10:07:10Z | 11.358605472976624 | +| 2022-09-21T10:07:20Z | 28.71147881415803 | +| 2022-09-21T10:07:30Z | -30.928830759588756 | +| 2022-09-21T10:07:40Z | -22.411848631056067 | +| 2022-09-21T10:07:50Z | 17.05503606764129 | +| 2022-09-21T10:08:00Z | 9.834382683760559 | +| 2022-09-21T10:08:10Z | -12.62058579127679 | +| 2022-09-21T10:08:20Z | -44.44668391211515 | {{% /expand %}} {{< /expand-wrapper >}} @@ -224,42 +224,42 @@ data | _time | _value | | -------------------- | ------------------- | -| 2022-09-12T19:23:30Z | 10.56555566168836 | -| 2022-09-12T19:23:40Z | -29.76098586714259 | -| 2022-09-12T19:23:50Z | -67.50435038579738 | -| 2022-09-12T19:24:00Z | -16.758669047964453 | -| 2022-09-12T19:24:10Z | -47.25865245658065 | -| 2022-09-12T19:24:20Z | 66.16082461651365 | -| 2022-09-12T19:24:30Z | -0.9179216017921821 | -| 2022-09-12T19:24:40Z | -56.89169240573004 | -| 2022-09-12T19:24:50Z | 11.358605472976624 | -| 2022-09-12T19:25:00Z | 28.71147881415803 | -| 2022-09-12T19:25:10Z | -30.928830759588756 | -| 2022-09-12T19:25:20Z | -22.411848631056067 | -| 2022-09-12T19:25:30Z | 17.05503606764129 | -| 2022-09-12T19:25:40Z | 9.834382683760559 | -| 2022-09-12T19:25:50Z | -12.62058579127679 | -| 2022-09-12T19:26:00Z | -44.44668391211515 | +| 2022-09-21T10:05:50Z | 10.56555566168836 | +| 2022-09-21T10:06:00Z | -29.76098586714259 | +| 2022-09-21T10:06:10Z | -67.50435038579738 | +| 2022-09-21T10:06:20Z | -16.758669047964453 | +| 2022-09-21T10:06:30Z | -47.25865245658065 | +| 2022-09-21T10:06:40Z | 66.16082461651365 | +| 2022-09-21T10:06:50Z | -0.9179216017921821 | +| 2022-09-21T10:07:00Z | -56.89169240573004 | +| 2022-09-21T10:07:10Z | 11.358605472976624 | +| 2022-09-21T10:07:20Z | 28.71147881415803 | +| 2022-09-21T10:07:30Z | -30.928830759588756 | +| 2022-09-21T10:07:40Z | -22.411848631056067 | +| 2022-09-21T10:07:50Z | 17.05503606764129 | +| 2022-09-21T10:08:00Z | 9.834382683760559 | +| 2022-09-21T10:08:10Z | -12.62058579127679 | +| 2022-09-21T10:08:20Z | -44.44668391211515 | #### Output data | _time | _value | | -------------------- | ------------------- | -| 2022-09-12T19:23:30Z | 10.56555566168836 | -| 2022-09-12T19:23:50Z | -67.50435038579738 | -| 2022-09-12T19:24:00Z | -16.758669047964453 | -| 2022-09-12T19:24:10Z | -47.25865245658065 | -| 2022-09-12T19:24:20Z | 66.16082461651365 | -| 2022-09-12T19:24:30Z | -0.9179216017921821 | -| 2022-09-12T19:24:40Z | -56.89169240573004 | -| 2022-09-12T19:24:50Z | 11.358605472976624 | -| 2022-09-12T19:25:00Z | 28.71147881415803 | -| 2022-09-12T19:25:10Z | -30.928830759588756 | -| 2022-09-12T19:25:20Z | -22.411848631056067 | -| 2022-09-12T19:25:30Z | 17.05503606764129 | -| 2022-09-12T19:25:40Z | 9.834382683760559 | -| 2022-09-12T19:26:00Z | -44.44668391211515 | +| 2022-09-21T10:05:50Z | 10.56555566168836 | +| 2022-09-21T10:06:10Z | -67.50435038579738 | +| 2022-09-21T10:06:20Z | -16.758669047964453 | +| 2022-09-21T10:06:30Z | -47.25865245658065 | +| 2022-09-21T10:06:40Z | 66.16082461651365 | +| 2022-09-21T10:06:50Z | -0.9179216017921821 | +| 2022-09-21T10:07:00Z | -56.89169240573004 | +| 2022-09-21T10:07:10Z | 11.358605472976624 | +| 2022-09-21T10:07:20Z | 28.71147881415803 | +| 2022-09-21T10:07:30Z | -30.928830759588756 | +| 2022-09-21T10:07:40Z | -22.411848631056067 | +| 2022-09-21T10:07:50Z | 17.05503606764129 | +| 2022-09-21T10:08:00Z | 9.834382683760559 | +| 2022-09-21T10:08:20Z | -44.44668391211515 | {{% /expand %}} {{< /expand-wrapper >}} diff --git a/content/flux/v0.x/stdlib/http/requests/get.md b/content/flux/v0.x/stdlib/http/requests/get.md index 7ae2e9c21..f5f9c6f1c 100644 --- a/content/flux/v0.x/stdlib/http/requests/get.md +++ b/content/flux/v0.x/stdlib/http/requests/get.md @@ -130,7 +130,7 @@ array.from(rows: [{name: data.name, age: data.age}]) | name | age | | --------- | ---- | -| nathaniel | 30 | +| nathaniel | 60 | {{% /expand %}} {{< /expand-wrapper >}} diff --git a/content/flux/v0.x/stdlib/http/requests/peek.md b/content/flux/v0.x/stdlib/http/requests/peek.md index b6f01879c..1c4763a0d 100644 --- a/content/flux/v0.x/stdlib/http/requests/peek.md +++ b/content/flux/v0.x/stdlib/http/requests/peek.md @@ -73,22 +73,23 @@ requests.peek(response: requests.get(url: "https://api.agify.io", params: ["name #### Output data -| body | duration | headers | statusCode | -| ----------------------------------------- | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | -| {"name":"natalie","age":34,"count":20959} | 100000000 | [ - Access-Control-Allow-Headers: Content-Type, X-Genderize-Source, - Access-Control-Allow-Methods: GET, +| body | duration | headers | statusCode | +| ----------------------------------------- | --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | +| {"age":48,"count":25082,"name":"natalie"} | 100000000 | [ + Access-Control-Allow-Credentials: true, Access-Control-Allow-Origin: *, + Access-Control-Expose-Headers: x-rate-limit-limit,x-rate-limit-remaining,x-rate-limit-reset, + Cache-Control: max-age=0, private, must-revalidate, Connection: keep-alive, Content-Length: 41, Content-Type: application/json; charset=utf-8, - Date: Mon, 12 Sep 2022 19:26:13 GMT, - Etag: W/"29-klDahUESBLxHyQ7NiaetCn2CvCI", + Date: Wed, 21 Sep 2022 10:08:39 GMT, Server: nginx/1.16.1, X-Rate-Limit-Limit: 1000, X-Rate-Limit-Remaining: 996, - X-Rate-Reset: 16427 -] | 200 | + X-Rate-Limit-Reset: 49881, + X-Request-Id: FxbYSObVA-veXYEAkbRi +] | 200 | {{% /expand %}} {{< /expand-wrapper >}} diff --git a/content/flux/v0.x/stdlib/http/requests/post.md b/content/flux/v0.x/stdlib/http/requests/post.md index b3a327e8b..548d71175 100644 --- a/content/flux/v0.x/stdlib/http/requests/post.md +++ b/content/flux/v0.x/stdlib/http/requests/post.md @@ -189,12 +189,12 @@ requests.peek(response: response) Cache-Control: max-age=604800, Content-Length: 1256, Content-Type: text/html; charset=UTF-8, - Date: Mon, 12 Sep 2022 19:26:13 GMT, + Date: Wed, 21 Sep 2022 10:08:40 GMT, Etag: "3147526947", - Expires: Mon, 19 Sep 2022 19:26:13 GMT, + Expires: Wed, 28 Sep 2022 10:08:40 GMT, Last-Modified: Thu, 17 Oct 2019 07:18:26 GMT, - Server: EOS (vny/0451) -] | 97664868 | + Server: EOS (vny/0453) +] | 147139602 | {{% /expand %}} {{< /expand-wrapper >}} diff --git a/content/flux/v0.x/stdlib/universe/aggregatewindow.md b/content/flux/v0.x/stdlib/universe/aggregatewindow.md index ff0ce284a..1f304199c 100644 --- a/content/flux/v0.x/stdlib/universe/aggregatewindow.md +++ b/content/flux/v0.x/stdlib/universe/aggregatewindow.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3852-L3875 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3866-L3889 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/bool.md b/content/flux/v0.x/stdlib/universe/bool.md index 6c1de66ca..07a734248 100644 --- a/content/flux/v0.x/stdlib/universe/bool.md +++ b/content/flux/v0.x/stdlib/universe/bool.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3116-L3116 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3130-L3130 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/bottom.md b/content/flux/v0.x/stdlib/universe/bottom.md index 4f5a2c9dd..87be9abf4 100644 --- a/content/flux/v0.x/stdlib/universe/bottom.md +++ b/content/flux/v0.x/stdlib/universe/bottom.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4122-L4124 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4136-L4138 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/bytes.md b/content/flux/v0.x/stdlib/universe/bytes.md index fd9602920..e7914da57 100644 --- a/content/flux/v0.x/stdlib/universe/bytes.md +++ b/content/flux/v0.x/stdlib/universe/bytes.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3134-L3134 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3148-L3148 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/contains.md b/content/flux/v0.x/stdlib/universe/contains.md index a0dab9055..476601d83 100644 --- a/content/flux/v0.x/stdlib/universe/contains.md +++ b/content/flux/v0.x/stdlib/universe/contains.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3539-L3539 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3553-L3553 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/cov.md b/content/flux/v0.x/stdlib/universe/cov.md index a3a786c41..410e98eb1 100644 --- a/content/flux/v0.x/stdlib/universe/cov.md +++ b/content/flux/v0.x/stdlib/universe/cov.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3677-L3679 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3691-L3693 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/display.md b/content/flux/v0.x/stdlib/universe/display.md index c45374ada..03570ee11 100644 --- a/content/flux/v0.x/stdlib/universe/display.md +++ b/content/flux/v0.x/stdlib/universe/display.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3512-L3512 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3526-L3526 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/doubleema.md b/content/flux/v0.x/stdlib/universe/doubleema.md index 1457981db..4776a95ea 100644 --- a/content/flux/v0.x/stdlib/universe/doubleema.md +++ b/content/flux/v0.x/stdlib/universe/doubleema.md @@ -23,7 +23,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4439-L4445 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4453-L4459 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/duration.md b/content/flux/v0.x/stdlib/universe/duration.md index f8ccc3fb5..8f1bdacc8 100644 --- a/content/flux/v0.x/stdlib/universe/duration.md +++ b/content/flux/v0.x/stdlib/universe/duration.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3184-L3184 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3198-L3198 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/filter.md b/content/flux/v0.x/stdlib/universe/filter.md index 180197d78..2fa3f9a2c 100644 --- a/content/flux/v0.x/stdlib/universe/filter.md +++ b/content/flux/v0.x/stdlib/universe/filter.md @@ -117,12 +117,12 @@ sampledata.int() | 2021-01-01T00:00:40Z | 13 | t2 | | 2021-01-01T00:00:50Z | 1 | t2 | -| _time | _value | *tag | -| ------ | ------- | ---- | - #### Output data +| _time | _value | *tag | +| ------ | ------- | ---- | + | _time | _value | *tag | | -------------------- | ------- | ---- | | 2021-01-01T00:00:00Z | 19 | t2 | diff --git a/content/flux/v0.x/stdlib/universe/findcolumn.md b/content/flux/v0.x/stdlib/universe/findcolumn.md index 62fbbbe6b..ca84c51f1 100644 --- a/content/flux/v0.x/stdlib/universe/findcolumn.md +++ b/content/flux/v0.x/stdlib/universe/findcolumn.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3031-L3034 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3045-L3048 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/findrecord.md b/content/flux/v0.x/stdlib/universe/findrecord.md index b6af58cb7..106c785c9 100644 --- a/content/flux/v0.x/stdlib/universe/findrecord.md +++ b/content/flux/v0.x/stdlib/universe/findrecord.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3068-L3071 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3082-L3085 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/float.md b/content/flux/v0.x/stdlib/universe/float.md index d7503fc3b..1504ad54e 100644 --- a/content/flux/v0.x/stdlib/universe/float.md +++ b/content/flux/v0.x/stdlib/universe/float.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3228-L3228 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3242-L3242 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/getcolumn.md b/content/flux/v0.x/stdlib/universe/getcolumn.md index 329f0c719..02fd73beb 100644 --- a/content/flux/v0.x/stdlib/universe/getcolumn.md +++ b/content/flux/v0.x/stdlib/universe/getcolumn.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2970-L2970 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2984-L2984 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/getrecord.md b/content/flux/v0.x/stdlib/universe/getrecord.md index d5f703f48..aef1c3780 100644 --- a/content/flux/v0.x/stdlib/universe/getrecord.md +++ b/content/flux/v0.x/stdlib/universe/getrecord.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2997-L2997 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3011-L3011 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/highestaverage.md b/content/flux/v0.x/stdlib/universe/highestaverage.md index bd94f0127..0a94c81c4 100644 --- a/content/flux/v0.x/stdlib/universe/highestaverage.md +++ b/content/flux/v0.x/stdlib/universe/highestaverage.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4207-L4215 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4221-L4229 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/highestcurrent.md b/content/flux/v0.x/stdlib/universe/highestcurrent.md index 3f6a49ba5..a6eac3d56 100644 --- a/content/flux/v0.x/stdlib/universe/highestcurrent.md +++ b/content/flux/v0.x/stdlib/universe/highestcurrent.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4242-L4250 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4256-L4264 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/highestmax.md b/content/flux/v0.x/stdlib/universe/highestmax.md index c6f10e262..bf61db2f5 100644 --- a/content/flux/v0.x/stdlib/universe/highestmax.md +++ b/content/flux/v0.x/stdlib/universe/highestmax.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4170-L4180 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4184-L4194 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/increase.md b/content/flux/v0.x/stdlib/universe/increase.md index 0719d618d..c1220a986 100644 --- a/content/flux/v0.x/stdlib/universe/increase.md +++ b/content/flux/v0.x/stdlib/universe/increase.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3902-L3905 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3916-L3919 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/int.md b/content/flux/v0.x/stdlib/universe/int.md index 4e5c00eb4..68abac4d3 100644 --- a/content/flux/v0.x/stdlib/universe/int.md +++ b/content/flux/v0.x/stdlib/universe/int.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3288-L3288 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3302-L3302 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/kaufmanser.md b/content/flux/v0.x/stdlib/universe/kaufmanser.md index a114be5d2..361540918 100644 --- a/content/flux/v0.x/stdlib/universe/kaufmanser.md +++ b/content/flux/v0.x/stdlib/universe/kaufmanser.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4472-L4475 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4486-L4489 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/length.md b/content/flux/v0.x/stdlib/universe/length.md index 23f11b741..361c92526 100644 --- a/content/flux/v0.x/stdlib/universe/length.md +++ b/content/flux/v0.x/stdlib/universe/length.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3562-L3562 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3576-L3576 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/linearbins.md b/content/flux/v0.x/stdlib/universe/linearbins.md index 9b7843633..9215a4df1 100644 --- a/content/flux/v0.x/stdlib/universe/linearbins.md +++ b/content/flux/v0.x/stdlib/universe/linearbins.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3585-L3585 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3599-L3599 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/logarithmicbins.md b/content/flux/v0.x/stdlib/universe/logarithmicbins.md index 19cde4a50..8880d2894 100644 --- a/content/flux/v0.x/stdlib/universe/logarithmicbins.md +++ b/content/flux/v0.x/stdlib/universe/logarithmicbins.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3608-L3608 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3622-L3622 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/lowestaverage.md b/content/flux/v0.x/stdlib/universe/lowestaverage.md index c9c13ec14..02e99e380 100644 --- a/content/flux/v0.x/stdlib/universe/lowestaverage.md +++ b/content/flux/v0.x/stdlib/universe/lowestaverage.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4314-L4322 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4328-L4336 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/lowestcurrent.md b/content/flux/v0.x/stdlib/universe/lowestcurrent.md index 363ba670b..cc5bc49c2 100644 --- a/content/flux/v0.x/stdlib/universe/lowestcurrent.md +++ b/content/flux/v0.x/stdlib/universe/lowestcurrent.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4349-L4357 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4363-L4371 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/lowestmin.md b/content/flux/v0.x/stdlib/universe/lowestmin.md index f5ce1de3b..f8a2380b2 100644 --- a/content/flux/v0.x/stdlib/universe/lowestmin.md +++ b/content/flux/v0.x/stdlib/universe/lowestmin.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4277-L4287 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4291-L4301 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/max.md b/content/flux/v0.x/stdlib/universe/max.md index fb56c5744..f93a23bab 100644 --- a/content/flux/v0.x/stdlib/universe/max.md +++ b/content/flux/v0.x/stdlib/universe/max.md @@ -59,7 +59,7 @@ Input data. Default is piped-forward data (`<-`). ## Examples -### Return the row with the maximum value +### Return the row with the maximum value from each input table ```js import "sampledata" diff --git a/content/flux/v0.x/stdlib/universe/median.md b/content/flux/v0.x/stdlib/universe/median.md index db54e79a2..8d8ab29e4 100644 --- a/content/flux/v0.x/stdlib/universe/median.md +++ b/content/flux/v0.x/stdlib/universe/median.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3965-L3967 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3979-L3981 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/pearsonr.md b/content/flux/v0.x/stdlib/universe/pearsonr.md index 0e21a6a84..b6de33b6b 100644 --- a/content/flux/v0.x/stdlib/universe/pearsonr.md +++ b/content/flux/v0.x/stdlib/universe/pearsonr.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3708-L3708 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3722-L3722 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/range.md b/content/flux/v0.x/stdlib/universe/range.md index 48ec34276..a8dc08224 100644 --- a/content/flux/v0.x/stdlib/universe/range.md +++ b/content/flux/v0.x/stdlib/universe/range.md @@ -61,7 +61,7 @@ Durations are relative to `now()`. Latest time to include in results. Default is `now()`. -Results _exclude_ rows with `_time` values that match the specified start time. +Results _exclude_ rows with `_time` values that match the specified stop time. Use a relative duration, absolute time, or integer (Unix timestamp in seconds). For example, `-1h`, `2019-08-28T22:00:00Z`, or `1567029600`. Durations are relative to `now()`. diff --git a/content/flux/v0.x/stdlib/universe/rename.md b/content/flux/v0.x/stdlib/universe/rename.md index 4e01f5754..05c804e4c 100644 --- a/content/flux/v0.x/stdlib/universe/rename.md +++ b/content/flux/v0.x/stdlib/universe/rename.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2264-L2268 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2278-L2282 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md @@ -30,7 +30,7 @@ Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md `rename()` renames columns in a table. -If a column in group key is renamed, the column name in the group key is updated. +If a column in the group key is renamed, the column name in the group key is updated. ##### Function type signature @@ -64,10 +64,11 @@ Input data. Default is piped-forward data (`<-`). ## Examples -- [Map column names to new column names](#map-column-names-to-new-column-names) +- [Explicitly map column names to new column names](#explicitly-map-column-names-to-new-column-names) - [Rename columns using a function](#rename-columns-using-a-function) +- [Conditionally rename columns using a function](#conditionally-rename-columns-using-a-function) -### Map column names to new column names +### Explicitly map column names to new column names ```js import "sampledata" @@ -180,3 +181,66 @@ sampledata.int() {{% /expand %}} {{< /expand-wrapper >}} + +### Conditionally rename columns using a function + +```js +import "sampledata" + +sampledata.int() + |> rename( + fn: (column) => { + _newColumnName = if column =~ /^_/ then "${column} (Reserved)" else column + + return _newColumnName + }, + ) + +``` + +{{< expand-wrapper >}} +{{% expand "View example input and ouput" %}} + +#### Input data + +| _time | _value | *tag | +| -------------------- | ------- | ---- | +| 2021-01-01T00:00:00Z | -2 | t1 | +| 2021-01-01T00:00:10Z | 10 | t1 | +| 2021-01-01T00:00:20Z | 7 | t1 | +| 2021-01-01T00:00:30Z | 17 | t1 | +| 2021-01-01T00:00:40Z | 15 | t1 | +| 2021-01-01T00:00:50Z | 4 | t1 | + +| _time | _value | *tag | +| -------------------- | ------- | ---- | +| 2021-01-01T00:00:00Z | 19 | t2 | +| 2021-01-01T00:00:10Z | 4 | t2 | +| 2021-01-01T00:00:20Z | -3 | t2 | +| 2021-01-01T00:00:30Z | 19 | t2 | +| 2021-01-01T00:00:40Z | 13 | t2 | +| 2021-01-01T00:00:50Z | 1 | t2 | + + +#### Output data + +| _time (Reserved) | _value (Reserved) | *tag | +| -------------------- | ------------------ | ---- | +| 2021-01-01T00:00:00Z | -2 | t1 | +| 2021-01-01T00:00:10Z | 10 | t1 | +| 2021-01-01T00:00:20Z | 7 | t1 | +| 2021-01-01T00:00:30Z | 17 | t1 | +| 2021-01-01T00:00:40Z | 15 | t1 | +| 2021-01-01T00:00:50Z | 4 | t1 | + +| _time (Reserved) | _value (Reserved) | *tag | +| -------------------- | ------------------ | ---- | +| 2021-01-01T00:00:00Z | 19 | t2 | +| 2021-01-01T00:00:10Z | 4 | t2 | +| 2021-01-01T00:00:20Z | -3 | t2 | +| 2021-01-01T00:00:30Z | 19 | t2 | +| 2021-01-01T00:00:40Z | 13 | t2 | +| 2021-01-01T00:00:50Z | 1 | t2 | + +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/flux/v0.x/stdlib/universe/sample.md b/content/flux/v0.x/stdlib/universe/sample.md index c18301427..48005cbfd 100644 --- a/content/flux/v0.x/stdlib/universe/sample.md +++ b/content/flux/v0.x/stdlib/universe/sample.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2298-L2300 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2312-L2314 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/set.md b/content/flux/v0.x/stdlib/universe/set.md index 9a5f92bc3..6b99f9766 100644 --- a/content/flux/v0.x/stdlib/universe/set.md +++ b/content/flux/v0.x/stdlib/universe/set.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2327-L2327 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2341-L2341 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/skew.md b/content/flux/v0.x/stdlib/universe/skew.md index 530b63831..b48acb39a 100644 --- a/content/flux/v0.x/stdlib/universe/skew.md +++ b/content/flux/v0.x/stdlib/universe/skew.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2420-L2420 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2434-L2434 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/sort.md b/content/flux/v0.x/stdlib/universe/sort.md index e74193ec0..eb2e2ee75 100644 --- a/content/flux/v0.x/stdlib/universe/sort.md +++ b/content/flux/v0.x/stdlib/universe/sort.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2477-L2477 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2491-L2491 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md @@ -50,7 +50,7 @@ When `desc: true`, null values are first in the sort order. ### columns -List of columns to sort by. Default is ["_value"]. +List of columns to sort by. Default is `["_value"]`. Sort precedence is determined by list order (left to right). diff --git a/content/flux/v0.x/stdlib/universe/spread.md b/content/flux/v0.x/stdlib/universe/spread.md index 6a4df4390..ed499c02a 100644 --- a/content/flux/v0.x/stdlib/universe/spread.md +++ b/content/flux/v0.x/stdlib/universe/spread.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2443-L2443 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2457-L2457 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/statecount.md b/content/flux/v0.x/stdlib/universe/statecount.md index 71f8719c5..6c6ce5f40 100644 --- a/content/flux/v0.x/stdlib/universe/statecount.md +++ b/content/flux/v0.x/stdlib/universe/statecount.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3997-L3999 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4011-L4013 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/stateduration.md b/content/flux/v0.x/stdlib/universe/stateduration.md index 5ce4d3c19..05066770b 100644 --- a/content/flux/v0.x/stdlib/universe/stateduration.md +++ b/content/flux/v0.x/stdlib/universe/stateduration.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4048-L4056 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4062-L4070 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/statetracking.md b/content/flux/v0.x/stdlib/universe/statetracking.md index e8cdb4b7e..df9c15082 100644 --- a/content/flux/v0.x/stdlib/universe/statetracking.md +++ b/content/flux/v0.x/stdlib/universe/statetracking.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2546-L2556 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2560-L2570 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/stddev.md b/content/flux/v0.x/stdlib/universe/stddev.md index 0e02bc21b..2fd02d54c 100644 --- a/content/flux/v0.x/stdlib/universe/stddev.md +++ b/content/flux/v0.x/stdlib/universe/stddev.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2588-L2591 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2602-L2605 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/string.md b/content/flux/v0.x/stdlib/universe/string.md index 1634a2958..1f4fc4c0e 100644 --- a/content/flux/v0.x/stdlib/universe/string.md +++ b/content/flux/v0.x/stdlib/universe/string.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3325-L3325 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3339-L3339 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/sum.md b/content/flux/v0.x/stdlib/universe/sum.md index 2305fd78c..e35c459f2 100644 --- a/content/flux/v0.x/stdlib/universe/sum.md +++ b/content/flux/v0.x/stdlib/universe/sum.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2613-L2613 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2627-L2627 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/tablefind.md b/content/flux/v0.x/stdlib/universe/tablefind.md index ee58353a8..34d17058f 100644 --- a/content/flux/v0.x/stdlib/universe/tablefind.md +++ b/content/flux/v0.x/stdlib/universe/tablefind.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2927-L2930 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2941-L2944 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/tail.md b/content/flux/v0.x/stdlib/universe/tail.md index 476fe5b40..11f224570 100644 --- a/content/flux/v0.x/stdlib/universe/tail.md +++ b/content/flux/v0.x/stdlib/universe/tail.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2364-L2364 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2378-L2378 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/time.md b/content/flux/v0.x/stdlib/universe/time.md index 4ed76a7c1..357bb7066 100644 --- a/content/flux/v0.x/stdlib/universe/time.md +++ b/content/flux/v0.x/stdlib/universe/time.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3367-L3367 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3381-L3381 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/timedmovingaverage.md b/content/flux/v0.x/stdlib/universe/timedmovingaverage.md index aa2049c96..938a6cff2 100644 --- a/content/flux/v0.x/stdlib/universe/timedmovingaverage.md +++ b/content/flux/v0.x/stdlib/universe/timedmovingaverage.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4402-L4407 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4416-L4421 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/timeshift.md b/content/flux/v0.x/stdlib/universe/timeshift.md index a5655d83f..2bf593501 100644 --- a/content/flux/v0.x/stdlib/universe/timeshift.md +++ b/content/flux/v0.x/stdlib/universe/timeshift.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2398-L2398 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2412-L2412 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/timeweightedavg.md b/content/flux/v0.x/stdlib/universe/timeweightedavg.md index fc2e11412..f17e880b3 100644 --- a/content/flux/v0.x/stdlib/universe/timeweightedavg.md +++ b/content/flux/v0.x/stdlib/universe/timeweightedavg.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3638-L3648 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3652-L3662 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/tobool.md b/content/flux/v0.x/stdlib/universe/tobool.md index 6492fdc57..18dc5a627 100644 --- a/content/flux/v0.x/stdlib/universe/tobool.md +++ b/content/flux/v0.x/stdlib/universe/tobool.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4749-L4749 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4763-L4763 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/today.md b/content/flux/v0.x/stdlib/universe/today.md index cc2e402a2..b200ce18a 100644 --- a/content/flux/v0.x/stdlib/universe/today.md +++ b/content/flux/v0.x/stdlib/universe/today.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4804-L4804 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4818-L4818 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/tofloat.md b/content/flux/v0.x/stdlib/universe/tofloat.md index eb52c48ca..15cd11407 100644 --- a/content/flux/v0.x/stdlib/universe/tofloat.md +++ b/content/flux/v0.x/stdlib/universe/tofloat.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4722-L4722 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4736-L4736 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/toint.md b/content/flux/v0.x/stdlib/universe/toint.md index 5fed082e2..69737935f 100644 --- a/content/flux/v0.x/stdlib/universe/toint.md +++ b/content/flux/v0.x/stdlib/universe/toint.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4638-L4638 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4652-L4652 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/top.md b/content/flux/v0.x/stdlib/universe/top.md index 110cf14d2..bddfa7e2a 100644 --- a/content/flux/v0.x/stdlib/universe/top.md +++ b/content/flux/v0.x/stdlib/universe/top.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4091-L4093 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4105-L4107 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/tostring.md b/content/flux/v0.x/stdlib/universe/tostring.md index b7fa9bf0b..b94000571 100644 --- a/content/flux/v0.x/stdlib/universe/tostring.md +++ b/content/flux/v0.x/stdlib/universe/tostring.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4589-L4589 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4603-L4603 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/totime.md b/content/flux/v0.x/stdlib/universe/totime.md index 120349cc9..1c81e795a 100644 --- a/content/flux/v0.x/stdlib/universe/totime.md +++ b/content/flux/v0.x/stdlib/universe/totime.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4780-L4780 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4794-L4794 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/touint.md b/content/flux/v0.x/stdlib/universe/touint.md index cb9a0a801..222c4f31c 100644 --- a/content/flux/v0.x/stdlib/universe/touint.md +++ b/content/flux/v0.x/stdlib/universe/touint.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4687-L4687 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4701-L4701 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/tripleema.md b/content/flux/v0.x/stdlib/universe/tripleema.md index b6261d6ce..c387d64ea 100644 --- a/content/flux/v0.x/stdlib/universe/tripleema.md +++ b/content/flux/v0.x/stdlib/universe/tripleema.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4512-L4520 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4526-L4534 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/tripleexponentialderivative.md b/content/flux/v0.x/stdlib/universe/tripleexponentialderivative.md index 8a87b25ae..52b082da5 100644 --- a/content/flux/v0.x/stdlib/universe/tripleexponentialderivative.md +++ b/content/flux/v0.x/stdlib/universe/tripleexponentialderivative.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2661-L2667 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2675-L2681 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/truncatetimecolumn.md b/content/flux/v0.x/stdlib/universe/truncatetimecolumn.md index 584744c91..153dec49f 100644 --- a/content/flux/v0.x/stdlib/universe/truncatetimecolumn.md +++ b/content/flux/v0.x/stdlib/universe/truncatetimecolumn.md @@ -22,7 +22,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4566-L4568 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L4580-L4582 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/uint.md b/content/flux/v0.x/stdlib/universe/uint.md index 08891e469..635191661 100644 --- a/content/flux/v0.x/stdlib/universe/uint.md +++ b/content/flux/v0.x/stdlib/universe/uint.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3416-L3416 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L3430-L3430 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/union.md b/content/flux/v0.x/stdlib/universe/union.md index 0685dbab9..125a7c31c 100644 --- a/content/flux/v0.x/stdlib/universe/union.md +++ b/content/flux/v0.x/stdlib/universe/union.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2724-L2724 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2738-L2738 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/unique.md b/content/flux/v0.x/stdlib/universe/unique.md index 627b5b3c5..e197d970a 100644 --- a/content/flux/v0.x/stdlib/universe/unique.md +++ b/content/flux/v0.x/stdlib/universe/unique.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2749-L2749 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2763-L2763 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/window.md b/content/flux/v0.x/stdlib/universe/window.md index cee2d119d..02dd1146a 100644 --- a/content/flux/v0.x/stdlib/universe/window.md +++ b/content/flux/v0.x/stdlib/universe/window.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2846-L2867 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2860-L2881 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md diff --git a/content/flux/v0.x/stdlib/universe/yield.md b/content/flux/v0.x/stdlib/universe/yield.md index 28bb2c3fa..73a65d504 100644 --- a/content/flux/v0.x/stdlib/universe/yield.md +++ b/content/flux/v0.x/stdlib/universe/yield.md @@ -21,7 +21,7 @@ documentation is generated. To make updates to this documentation, update the function comments above the function definition in the Flux source code: -https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2897-L2897 +https://github.com/influxdata/flux/blob/master/stdlib/universe/universe.flux#L2911-L2911 Contributing to Flux: https://github.com/influxdata/flux#contributing Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md From 07421d073bb70f8bfe005371d0c21eceb82f39ed Mon Sep 17 00:00:00 2001 From: noramullen1 <42354779+noramullen1@users.noreply.github.com> Date: Wed, 21 Sep 2022 11:01:13 -0700 Subject: [PATCH 10/17] telegraf 1.24.1 (#4467) * telegraf 1.24.1 * Update content/telegraf/v1.24/release-notes-changelog.md Co-authored-by: kelseiv <47797004+kelseiv@users.noreply.github.com> Co-authored-by: kelseiv <47797004+kelseiv@users.noreply.github.com> --- .../telegraf/v1.24/release-notes-changelog.md | 25 +++++++++++++++++++ data/products.yml | 5 ++-- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/content/telegraf/v1.24/release-notes-changelog.md b/content/telegraf/v1.24/release-notes-changelog.md index b156287b3..838f3da35 100644 --- a/content/telegraf/v1.24/release-notes-changelog.md +++ b/content/telegraf/v1.24/release-notes-changelog.md @@ -10,6 +10,31 @@ menu: name: Release notes weight: 60 --- +## v1.24.1 [2022-09-19] + +### Bug fixes +- Clear error message when provided config is not a text file +- Enable global confirmation for installing mingw + +### Input plugin updates +- Ceph (`ceph`): Modernize metrics. +- Modbus (`modbus`): Do not fail if a single server reports errors. +- NTPQ (`ntpq`): Handle pools with `-`. + +### Parser updates +- CSV (`csv`): Remove direct check. +- XPath (`xpath`): Add array index when expanding names. +- Fix memory leak for plugins using `ParserFunc`. +- Unwrap parsers and remove some special handling. +- `processors.parser`: Add option to parse tags + +### Dependency updates +- Update `cloud.google.com/go/pubsub` from 1.24.0 to 1.25.1. +- Update `github.com/urfave/cli/v2` from 2.14.1 to 2.16.3. +- Update `github.com/aws/aws-sdk-go-v2/service/ec2`. +- Update `github.com/wavefronthq/wavefront-sdk-go`. +- Update `cloud.google.com/go/bigquery` from 1.33.0 to 1.40.0. + ## v1.24.0 [2022-09-12] ### Breaking change diff --git a/data/products.yml b/data/products.yml index d1e3f033a..09dc5a45e 100644 --- a/data/products.yml +++ b/data/products.yml @@ -37,9 +37,10 @@ telegraf: name: Telegraf namespace: telegraf list_order: 4 - versions: [v1.9, v1.10, v1.11, v1.12, v1.13, v1.14, v1.15, v1.16, v1.17, v1.18, v1.19, v1.20, v1.21, v1.22, v1.23] - latest: v1.23 + versions: [v1.9, v1.10, v1.11, v1.12, v1.13, v1.14, v1.15, v1.16, v1.17, v1.18, v1.19, v1.20, v1.21, v1.22, v1.23, v1.24] + latest: v1.24 latest_patches: + "1.24": 1 "1.23": 4 "1.22": 2 "1.21": 4 From 1d34362e3cb70a3fa87f9abba7dbc6e75cd5f8d4 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 22 Sep 2022 11:28:35 -0700 Subject: [PATCH 11/17] Update JSON Parsing Guide (#4465) * cleanup and edit JSON parsing doc * fix typo and working * Update content/influxdb/cloud/reference/json-parsing.md Co-authored-by: Scott Anderson * Update content/influxdb/cloud/reference/json-parsing.md Co-authored-by: Scott Anderson * Apply suggestions from code review Co-authored-by: Scott Anderson * Update content/influxdb/cloud/reference/json-parsing.md Co-authored-by: Scott Anderson * added data type column to tables Co-authored-by: Scott Anderson Co-authored-by: noramullen1 <42354779+noramullen1@users.noreply.github.com> --- .../influxdb/cloud/reference/json-parsing.md | 96 ++++++++++++++----- 1 file changed, 71 insertions(+), 25 deletions(-) diff --git a/content/influxdb/cloud/reference/json-parsing.md b/content/influxdb/cloud/reference/json-parsing.md index b620e5c68..ebd8b8436 100644 --- a/content/influxdb/cloud/reference/json-parsing.md +++ b/content/influxdb/cloud/reference/json-parsing.md @@ -10,45 +10,91 @@ influxdb/v2.0/tags: [mqtt] related: --- -Use the following examples to help you set up parsing rules for [native subscriptions](/influxdb/cloud/write-data/no-code/native-subscriptions). +Use the following examples to help you set up JSON parsing rules using [JSON Path](https://jsonpath.com/) +for [native subscriptions](/influxdb/cloud/write-data/no-code/native-subscriptions). All JSON paths start with a `$`. -## Example simple MQTT message in JSON format +## Example MQTT message in "flat" JSON format -```js +```json { "device_type":"temperature_sensor", "device_id":2036, "model_id":"KN24683", - "temperature":25.0, - "time":1653998899010000000, +"temperature":25.0, +"time":1653998899010000000, "error_state":"in_error" - } ``` -JSON paths start with a “$.” In the above example, all of the values are at the root level of the JSON, so the JSON paths for these elements are very simple: +With "flat" JSON, all values are at the root level (`$`) and are referenced with dot notation. -- Measurement: $.device_type -- Timestamp: $.time -- Tag: $.device_id -- Field 1: $.temperature -- Field 2: $.error_state +| InfluxDB Element | JSON Path | Data Type | Parsed Result | +| :--------------- | :-------------- | :-------- | :------------------- | +| Measurement | `$.device_type` | String | "temperature_sensor" | +| Timestamp | `$.time` | Timestamp | 1653998899010000000 | +| Tag | `$.device_id` | Integer | 2036 | +| Field 1 | `$.temperature` | Float | 25.0 | +| Field 2 | `$.error_state` | String | "in_error" | +## Example MQTT message with nested JSON objects -## Example nested MQTT message in JSON format - -```js +```json { -"device_information": { -"device_type":"temperature_sensor", -"device_id":2036, -"model_id":"KN24683" -}, - "temperature":25.0, - "time":165411795400000000, - "error_state":"in_error" + "device_information": { + "device_type":"temperature_sensor", + "device_id":2036, + "model_id":"KN24683" + }, + "temperature":25.0, + "time":165411795400000000, + "error_state":"in_error" } ``` -In this example, the JSON path to the measurement would be `$.device_information.device_type` -The JSON path to the tag would be `$device_information.device_id`. +| InfluxDB Element | JSON Path | Data Type | Parsed Result | +| :--------------- | :--------------------------------- | :-------- | :------------------- | +| Measurement | `$.device_information.device_type` | String | "temperature_sensor" | +| Timestamp | `$.time` | Timestamp | 1653998899010000000 | +| Tag | `$.device_information.device_id` | Integer | 2036 | +| Field 1 | `$.temperature` | Float | 25.0 | +| Field 2 | `$.error_state` | String | "in_error" | + +## Example MQTT message with JSON arrays +Currently, there is limited support for working with key/value pairs that are held within +a JSON array. Entire arrays cannot be loaded into a single field value, but if your messages +have a fixed number of values in the array being passed, you can specify an array index number +in your JSON path. + + +```json +{ + "device_information":{ + "device_type":"temperature_sensor", + "device_id":2309, + "model_id":"KN24683" + }, + "time":1653998899010000000, + "temperature":25.0, + "error_state":"in_error", + "errors_encountered":[ + { + "time_encountered":"2022:05:30:23:11", + "error_number":403 + }, + { + "time_encountered":"2022:06:01:12:15", + "error_number":404 + } + ] +} +``` + +| InfluxDB Element | JSON Path | Data Type | Parsed Result | +| :--------------- | :-------------------------------------- | :-------- | :------------------- | +| Measurement | `$.device_information.device_type` | String | "temperature_sensor" | +| Timestamp | `$.time` | Timestamp | 1653998899010000000 | +| Tag | `$.device_information.device_id` | Integer | 2036 | +| Field 1 | `$.temperature` | Float | 25.0 | +| Field 2 | `$.error_state` | String | "in_error" | +| Field 3 | `$.errors_encountered.[0].error_number` | Integer | 403 | +| Field 4 | `$.errors_encountered.[1].error_number` | Integer | 404 | From bbe21c43c1e59b29d9b5b77c75ef409edb7c5681 Mon Sep 17 00:00:00 2001 From: Sunbrye Ly Date: Fri, 23 Sep 2022 08:40:14 -1000 Subject: [PATCH 12/17] feat: dar-331 --- content/influxdb/v2.4/process-data/get-started.md | 5 +++++ .../v2.4/process-data/manage-tasks/task-run-history.md | 7 +++++++ 2 files changed, 12 insertions(+) diff --git a/content/influxdb/v2.4/process-data/get-started.md b/content/influxdb/v2.4/process-data/get-started.md index e3d8af1f7..5a0677176 100644 --- a/content/influxdb/v2.4/process-data/get-started.md +++ b/content/influxdb/v2.4/process-data/get-started.md @@ -49,10 +49,15 @@ option task = {name: "downsample_5m_precision", every: 1h, offset: 0m} _See [Task configuration options](/influxdb/v2.4/process-data/task-options) for detailed information about each option._ +_Note that InfluxDB doesn't guarantee that a task will run at the scheduled time. +See [View task run logs for a task](/influxdb/v2.4/process-data/manage-tasks/task-run-history/#view-task-run-logs-with-the-influxdb-api) +for detailed information on task service-level agreements (SLAs)._ + {{% note %}} The InfluxDB UI provides a form for defining task options. {{% /note %}} + {{% cloud-only %}} ### Task options for invokable scripts diff --git a/content/influxdb/v2.4/process-data/manage-tasks/task-run-history.md b/content/influxdb/v2.4/process-data/manage-tasks/task-run-history.md index 796249785..9ebc5495c 100644 --- a/content/influxdb/v2.4/process-data/manage-tasks/task-run-history.md +++ b/content/influxdb/v2.4/process-data/manage-tasks/task-run-history.md @@ -74,3 +74,10 @@ endpoint](/influxdb/v2.4/api/#operation/GetTasksIDRunsIDLogs). {{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID/runs/RUN_ID/logs" >}} To retry failed task runs, see how to [run tasks](/influxdb/v2.4/process-data/manage-tasks/run-task/). + +{{% warn %}} +InfluxDB doesn’t guarantee that a task will run at the scheduled time. During busy +periods, tasks are added to the run queue and processed in order of submission. +The scheduled start time and actual start time can be viewed in the logs under +`scheduledFor` and `startedAt`. +{{% /warn %}} \ No newline at end of file From 9472a6c7fce97e801a5dd2f4dcd0ef2e204a7733 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Fri, 23 Sep 2022 14:42:19 -0600 Subject: [PATCH 13/17] Procedural documentation for the join package (#4466) * WIP task-based join docs * WIP join task-based docs * WIP prodedural join docs * WIP new join docs * WIP join docs, join diagrams * WIP join docs * WIP join docs * wrap up join docs * Apply suggestions from code review Co-authored-by: kelseiv <47797004+kelseiv@users.noreply.github.com> * apply updates from pr review * minor updates * fixed typo, updated update-flux-versions.js script * Apply suggestions from code review Co-authored-by: kelseiv <47797004+kelseiv@users.noreply.github.com> * Apply suggestions from code review * address PR feedback * add flux stdlib related links for join * fixed frontmatter injection Co-authored-by: kelseiv <47797004+kelseiv@users.noreply.github.com> --- assets/js/content-interactions.js | 23 +- assets/styles/layouts/article/_flex.scss | 12 +- assets/styles/layouts/article/_svgs.scss | 29 ++ content/flux/v0.x/get-started/data-model.md | 24 +- content/flux/v0.x/join-data/_index.md | 147 ++++++++++ content/flux/v0.x/join-data/full-outer.md | 252 ++++++++++++++++++ content/flux/v0.x/join-data/inner.md | 196 ++++++++++++++ content/flux/v0.x/join-data/left-outer.md | 207 ++++++++++++++ content/flux/v0.x/join-data/right-outer.md | 170 ++++++++++++ content/flux/v0.x/join-data/time.md | 205 ++++++++++++++ .../flux/v0.x/join-data/troubleshoot-joins.md | 186 +++++++++++++ data/flux_influxdb_versions.yml | 10 +- data/flux_stdlib_frontmatter.yml | 35 +++ flux-build-scripts/update-flux-versions.js | 8 + layouts/shortcodes/children.html | 3 +- layouts/shortcodes/svg.html | 16 +- static/svgs/join-diagram.svg | 10 + 17 files changed, 1493 insertions(+), 40 deletions(-) create mode 100644 content/flux/v0.x/join-data/_index.md create mode 100644 content/flux/v0.x/join-data/full-outer.md create mode 100644 content/flux/v0.x/join-data/inner.md create mode 100644 content/flux/v0.x/join-data/left-outer.md create mode 100644 content/flux/v0.x/join-data/right-outer.md create mode 100644 content/flux/v0.x/join-data/time.md create mode 100644 content/flux/v0.x/join-data/troubleshoot-joins.md create mode 100644 static/svgs/join-diagram.svg diff --git a/assets/js/content-interactions.js b/assets/js/content-interactions.js index 8398af98d..d7e6b4647 100644 --- a/assets/js/content-interactions.js +++ b/assets/js/content-interactions.js @@ -200,29 +200,16 @@ $('.tooltip').each( function(){ $(this).prepend($toolTipElement); }); -/////////////////// Style time columns in tables to not wrap /////////////////// +//////////////////// Style time cells in tables to not wrap //////////////////// $('.article--content table').each(function() { var table = $(this); - var timeColumns = ['_time', '*_time', '_start', '*_start', '_stop', '*_stop']; - let header = []; - let timeColumnIndexes = []; - // Return an array of column headers - table.find('th').each(function () { - header.push($(this)[0].innerHTML); - }); - - // Return indexes of time columns - header.forEach(function(value, i) { - if ( timeColumns.includes(value) ) { timeColumnIndexes.push(i) }; - }); - - // Add the nowrap class to cells with time column indexes table.find('td').each(function() { - if (timeColumnIndexes.includes( $(this)[0].cellIndex )) { - $(this).addClass('nowrap'); + let cellContent = $(this)[0].innerText + + if (/\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.*Z/.test(cellContent)) { + $(this).addClass('nowrap') } }) - }) diff --git a/assets/styles/layouts/article/_flex.scss b/assets/styles/layouts/article/_flex.scss index 5a3ebdb3b..bc0c314c0 100644 --- a/assets/styles/layouts/article/_flex.scss +++ b/assets/styles/layouts/article/_flex.scss @@ -7,19 +7,20 @@ } .flex-container { - margin-right: 1rem; + margin-right: 1.5rem; - &.half { width: calc(50% - 1rem); } - &.third { width: calc(33.33% - 1rem); } - &.quarter { width: calc(25% - 1rem); } + &.half { width: calc(50% - 1.5rem); } + &.third { width: calc(33.33% - 1.5rem); } + &.quarter { width: calc(25% - 1.5rem); } &.two-thirds { width: calc(66% - 2rem);} &.half, &.third, &.quarter { - table:not(:last-child) {margin-right: 1rem;} + table:not(:last-child) {margin-right: 1.5rem;} } img { margin-bottom: 0;} table { display: table; } + p:last-child {margin-bottom: 0.5rem;} } //////////////////////////////////////////////////////////////////////////////// @@ -32,5 +33,6 @@ &.third, &.two-thirds { width: calc(100% - 1rem); } &.quarter { width: calc(50% - 1rem); } + p:last-child {margin-bottom: 1.5rem;} } } diff --git a/assets/styles/layouts/article/_svgs.scss b/assets/styles/layouts/article/_svgs.scss index 2f8d0d316..654e23ec8 100644 --- a/assets/styles/layouts/article/_svgs.scss +++ b/assets/styles/layouts/article/_svgs.scss @@ -14,6 +14,35 @@ svg { .st1 {fill: $article-text;} .st2 {font-family: $rubik; font-weight: $medium} } + + //////////////////////////////// Join Diagram //////////////////////////////// + &#join-diagram { + $fill-color: rgba($article-text, .35); + display: block; + max-width: 250px; + margin: 1rem 0 2rem; + + &.center {margin: 0 auto 2rem auto;} + &.small {max-width: 125px; path{stroke-width: 3;} } + + path { + stroke: $article-text; + stroke-width:2; + stroke-miterlimit:10; + fill: none; + } + &.inner {path { &#center {fill:$fill-color; }}} + &.left {path { &#left, &#center {fill:$fill-color; }}} + &.right {path { &#center, &#right {fill:$fill-color; }}} + &.full {path { &#left, &#center, &#right {fill:$fill-color; }}} + } + +} + +@include media(small) { + svg { + &#join-diagram {margin: 1rem auto 2rem; } + } } //////////////////////////// Styles for SVG legends //////////////////////////// diff --git a/content/flux/v0.x/get-started/data-model.md b/content/flux/v0.x/get-started/data-model.md index 673c0e977..9270ebda9 100644 --- a/content/flux/v0.x/get-started/data-model.md +++ b/content/flux/v0.x/get-started/data-model.md @@ -39,16 +39,22 @@ that contains one value for each [row](#row). A **row** is a collection of associated [column](#column) values. #### Group key -A **group key** defines which columns and specific column values to include in a table. -All rows in a table contain the same values in group key columns. -All tables in a stream of tables have a unique group key, but group key -modifications are applied to a stream of tables. +A **group key** defines which columns to use to group tables in a stream of tables. +Each table in a stream of tables represents a unique **group key instance**. +All rows in a table contain the same values for each group key column. -##### Example group keys -Group keys contain key-value pairs, where each key represents a column name and -each value represents the column value included in the table. -The following are examples of group keys in a stream of tables with three separate tables. -Each group key represents a table containing data for a unique location: +##### Example group key +A group key can be represented by an array of column labels. + +``` +[_measurement, facility, _field] +``` + +##### Example group key instances +Group key instances (unique to each table) include key-value pairs that identify +each column name in the table that has the same value. +The following are examples of group key instances in a stream of tables with three separate tables. +Each represents a table containing data for a unique location: ``` [_measurement: "production", facility: "us-midwest", _field: "apq"] diff --git a/content/flux/v0.x/join-data/_index.md b/content/flux/v0.x/join-data/_index.md new file mode 100644 index 000000000..73d452baa --- /dev/null +++ b/content/flux/v0.x/join-data/_index.md @@ -0,0 +1,147 @@ +--- +title: Join data +seotitle: Join data with Flux +description: > + Flux supports inner, full outer, left outer, and right outer joins. + Learn how to use the `join` package to join two data sets with common values. +menu: + flux_0_x: + name: Join data +weight: 8 +related: + - /flux/v0.x/stdlib/join/ + - /flux/v0.x/stdlib/join/inner/ + - /flux/v0.x/stdlib/join/left/ + - /flux/v0.x/stdlib/join/right/ + - /flux/v0.x/stdlib/join/full/ + - /flux/v0.x/stdlib/join/time/ +--- + +Use the Flux [`join` package](/flux/v0.x/stdlib/join/) to join two data sets based on common values. +Learn how join two data sets using the following join methods: + +{{< flex >}} +{{< flex-content "quarter" >}} + +

Inner join

+ {{< svg svg="static/svgs/join-diagram.svg" class="inner small center" >}} +
+{{< /flex-content >}} +{{< flex-content "quarter" >}} + +

Left outer join

+ {{< svg svg="static/svgs/join-diagram.svg" class="left small center" >}} +
+{{< /flex-content >}} +{{< flex-content "quarter" >}} + +

Right outer join

+ {{< svg svg="static/svgs/join-diagram.svg" class="right small center" >}} +
+{{< /flex-content >}} +{{< flex-content "quarter" >}} + +

Full outer join

+ {{< svg svg="static/svgs/join-diagram.svg" class="full small center" >}} +
+{{< /flex-content >}} +{{< /flex >}} + +{{% note %}} +#### When to use the join package + +We recommend using the `join` package to join streams that have mostly different +schemas or that come from two separate data sources. +If you're joining data from the same data source with the same schema, using +[`union()`](/flux/v0.x/stdlib/universe/union/) and [`pivot()`](/flux/v0.x/stdlib/universe/pivot/) +to combine the data will likely be more performant. +{{% /note %}} + +- [How join functions work](#how-join-functions-work) + - [Input streams](#input-streams) + - [Join predicate function (on)](#join-predicate-function-on) + - [Join output function (as)](#join-output-function-as) +- [Perform join operations](#perform-join-operations) + {{< children type="anchored-list" filterOut="Troubleshoot join operations" >}} +- [Troubleshoot join operations](#troubleshoot-join-operations) + +## How join functions work + +`join` functions join _two_ streams of tables together based +on common values in each input stream. + +- [Input streams](#input-streams) +- [Join predicate function (on)](#join-predicate-function-on) +- [Join output function (as)](#join-output-function-as) + +### Input streams + +Each input stream is assigned to the `left` or `right` parameter. +Input streams can be defined from any valid data source. +For more information, see: + +- [Query data sources](/flux/v0.x/query-data/) +- Define ad hoc tables with [`array.from()`](/flux/v0.x/stdlib/array/from/) + +#### Data requirements + +To join data, each input stream must have the following: + +- **One or more columns with common values to join on**. + Columns do not need identical labels, but they do need to have comparable values. +- **Identical [group keys](/flux/v0.x/get-started/data-model/#group-key)**. + Functions in the `join` package use group keys to quickly determine what tables + from each input stream should be paired and evaluated for the join operation. + _Both input streams should have the same group key._ + If they don't, your join operation may not find any matching tables and will + return unexpected output. + If the group keys of your input streams are not identical, use + [`group()`](/flux/v0.x/stdlib/universe/group/) to regroup each input + stream before joining them together. + + {{% note %}} +Only tables with the same [group key instance](/flux/v0.x/get-started/data-model/#example-group-key-instances) +are joined. + {{% /note %}} + +### Join predicate function (on) + +`join` package functions require the `on` parameter to compare values from each input stream (represented by `l` (left) and `r` (right)) +and returns `true` or `false`. +Rows that return `true` are joined. +This parameter is a [predicate function](/flux/v0.x/get-started/syntax-basics/#predicate-functions). + + +```js +(l, r) => l.column == r.column +``` + +### Join output function (as) + +`join` package functions _(except [`join.time()`](/flux/v0.x/stdlib/join/time/))_ +require the `as` parameter to define the output schema of the join. +The `as` parameter returns a new record using values from +joined rows–left (`l`) and right (`r`). + +```js +(l, r) => ({l with name: r.name, location: r.location}) +``` + +{{% note %}} +#### Do not modify group key columns + +Do not modify group key columns. The `as` function must return the same group key as both input streams to successfully perform a join. +{{% /note %}} + +## Perform join operations + +The `join` package supports the following join types and special use cases: + +{{< children type="anchored-list" filterOut="Troubleshoot join operations" >}} + +{{< children readmore=true filterOut="Troubleshoot join operations" >}} + +## Troubleshoot join operations + +For information about unexpected behaviors and errors when using the `join` package, +see [Troubleshoot join operations](/flux/v0.x/join-data/troubleshoot-joins/). diff --git a/content/flux/v0.x/join-data/full-outer.md b/content/flux/v0.x/join-data/full-outer.md new file mode 100644 index 000000000..c20ed0cce --- /dev/null +++ b/content/flux/v0.x/join-data/full-outer.md @@ -0,0 +1,252 @@ +--- +title: Perform a full outer join +description: > + Use [`join.full()`](/flux/v0.x/stdlib/join/full/) to perform an full outer join of two streams of data. + Full outer joins output a row for all rows in both the **left** and **right** input streams + and join rows that match according to the `on` predicate. +menu: + flux_0_x: + name: Full outer join + parent: Join data +weight: 103 +related: + - /flux/v0.x/join-data/troubleshoot-joins/ + - /flux/v0.x/stdlib/join/ + - /flux/v0.x/stdlib/join/full/ +list_code_example: | + ```js + import "join" + + left = from(bucket: "example-bucket-1") |> //... + right = from(bucket: "example-bucket-2") |> //... + + join.full( + left: left, + right: right, + on: (l, r) => l.id== r.id, + as: (l, r) => { + id = if exists l.id then l.id else r.id + + return {name: l.name, location: r.location, id: id} + }, + ) + ``` +--- + +Use [`join.full()`](/flux/v0.x/stdlib/join/full/) to perform an full outer join of two streams of data. +Full outer joins output a row for all rows in both the **left** and **right** input streams +and join rows that match according to the `on` predicate. + +{{< svg svg="static/svgs/join-diagram.svg" class="full" >}} + +{{< expand-wrapper >}} +{{% expand "View table illustration of a full outer join" %}} +{{< flex >}} +{{% flex-content "third" %}} +#### left +| | | | +| :-- | :----------------------------------- | :----------------------------------- | +| r1 | | | +| r2 | | | +{{% /flex-content %}} +{{% flex-content "third" %}} +#### right +| | | | +| :-- | :----------------------------------- | :----------------------------------- | +| r1 | | | +| r3 | | | +| r4 | | | +{{% /flex-content %}} +{{% flex-content "third" %}} +#### Full outer join result + +| | | | | | +| :-- | :----------------------------------- | :----------------------------------- | :----------------------------------- | :----------------------------------- | +| r1 | | | | | +| r2 | | | | | +| r3 | | | | | +| r4 | | | | | +{{% /flex-content %}} +{{< /flex >}} +{{% /expand %}} +{{< /expand-wrapper >}} + +## Use join.full to join your data + +1. Import the `join` package. +2. Define the **left** and **right** data streams to join: + + - Each stream must have one or more columns with common values. + Column labels do not need to match, but column values do. + - Each stream should have identical [group keys](/flux/v0.x/get-started/data-model/#group-key). + + _For more information, see [join data requirements](/flux/v0.x/join-data/#data-requirements)._ + +3. Use `join.full()` to join the two streams together. + Provide the following required parameters: + + - `left`: Stream of data representing the left side of the join. + - `right`: Stream of data representing the right side of the join. + - `on`: [Join predicate](/flux/v0.x/join-data/#join-predicate-function-on). + For example: `(l, r) => l.column == r.column`. + - `as`: [Join output function](/flux/v0.x/join-data/#join-output-function-as) + that returns a record with values from each input stream. + + ##### Account for missing, non-group-key values + + In a full outer join, it’s possible for either the left (`l`) or right (`r`) + to contain _null_ values for the columns used in the join operation + and default to a default record (group key columns are populated and + other columns are _null_). + `l` and `r` will never both use default records at the same time. + + To ensure non-null values are included in the output for non-group-key columns, + check for the existence of a value in the `l` or `r` record, and return + the value that exists: + + ```js + (l, r) => { + id = if exists l.id then l.id else r.id + + return {_time: l.time, location: r.location, id: id} + } + ``` + +The following example uses a filtered selection from the +[**machineProduction** sample data set](/flux/v0.x/stdlib/influxdata/influxdb/sample/data/#set) +as the **left** data stream and an ad-hoc table created with [`array.from()`](/flux/v0.x/stdlib/array/from/) +as the **right** data stream. + +{{% note %}} +#### Example data grouping + +The example below ungroups the **left** stream to match the grouping of the **right** stream. +After the two streams are joined together, the joined data is grouped by `stationID` +and sorted by `_time`. +{{% /note %}} + +```js +import "array" +import "influxdata/influxdb/sample" +import "join" + +left = + sample.data(set: "machineProduction") + |> filter(fn: (r) => r.stationID == "g1" or r.stationID == "g2" or r.stationID == "g3") + |> filter(fn: (r) => r._field == "oil_temp") + |> limit(n: 5) + +right = + array.from( + rows: [ + {station: "g1", opType: "auto", last_maintained: 2021-07-15T00:00:00Z}, + {station: "g2", opType: "manned", last_maintained: 2021-07-02T00:00:00Z}, + {station: "g4", opType: "auto", last_maintained: 2021-08-04T00:00:00Z}, + ], + ) + +join.full( + left: left |> group(), + right: right, + on: (l, r) => l.stationID == r.station, + as: (l, r) => { + stationID = if exists l.stationID then l.stationID else r.station + + return { + stationID: stationID, + _time: l._time, + _field: l._field, + _value: l._value, + opType: r.opType, + maintained: r.last_maintained, + } + }, +) + |> group(columns: ["stationID"]) + |> sort(columns: ["_time"]) +``` + +{{< expand-wrapper >}} +{{% expand "View example input and output" %}} + +### Input + +#### left {#left-input} + +{{% note %}} +_`_start` and `_stop` columns have been omitted._ +{{% /note %}} + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -----: | +| 2021-08-01T00:00:00Z | machinery | g1 | oil_temp | 39.1 | +| 2021-08-01T00:00:11.51Z | machinery | g1 | oil_temp | 40.3 | +| 2021-08-01T00:00:19.53Z | machinery | g1 | oil_temp | 40.6 | +| 2021-08-01T00:00:25.1Z | machinery | g1 | oil_temp | 40.72 | +| 2021-08-01T00:00:36.88Z | machinery | g1 | oil_temp | 40.8 | + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -----: | +| 2021-08-01T00:00:00Z | machinery | g2 | oil_temp | 40.6 | +| 2021-08-01T00:00:27.93Z | machinery | g2 | oil_temp | 40.6 | +| 2021-08-01T00:00:54.96Z | machinery | g2 | oil_temp | 40.6 | +| 2021-08-01T00:01:17.27Z | machinery | g2 | oil_temp | 40.6 | +| 2021-08-01T00:01:41.84Z | machinery | g2 | oil_temp | 40.6 | + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -----: | +| 2021-08-01T00:00:00Z | machinery | g3 | oil_temp | 41.4 | +| 2021-08-01T00:00:14.46Z | machinery | g3 | oil_temp | 41.36 | +| 2021-08-01T00:00:25.29Z | machinery | g3 | oil_temp | 41.4 | +| 2021-08-01T00:00:38.77Z | machinery | g3 | oil_temp | 41.4 | +| 2021-08-01T00:00:51.2Z | machinery | g3 | oil_temp | 41.4 | + +#### right {#right-input} + +| station | opType | last_maintained | +| :------ | :----- | -------------------: | +| g1 | auto | 2021-07-15T00:00:00Z | +| g2 | manned | 2021-07-02T00:00:00Z | +| g4 | auto | 2021-08-04T00:00:00Z | + +### Output {#example-output} + +| _time | stationID | _field | _value | maintained | opType | +| :---------------------- | :-------- | :------- | -----: | :------------------- | :----- | +| 2021-08-01T00:00:00Z | g1 | oil_temp | 39.1 | 2021-07-15T00:00:00Z | auto | +| 2021-08-01T00:00:11.51Z | g1 | oil_temp | 40.3 | 2021-07-15T00:00:00Z | auto | +| 2021-08-01T00:00:19.53Z | g1 | oil_temp | 40.6 | 2021-07-15T00:00:00Z | auto | +| 2021-08-01T00:00:25.1Z | g1 | oil_temp | 40.72 | 2021-07-15T00:00:00Z | auto | +| 2021-08-01T00:00:36.88Z | g1 | oil_temp | 40.8 | 2021-07-15T00:00:00Z | auto | + +| _time | stationID | _field | _value | maintained | opType | +| :---------------------- | :-------- | :------- | -----: | :------------------- | :----- | +| 2021-08-01T00:00:00Z | g2 | oil_temp | 40.6 | 2021-07-02T00:00:00Z | manned | +| 2021-08-01T00:00:27.93Z | g2 | oil_temp | 40.6 | 2021-07-02T00:00:00Z | manned | +| 2021-08-01T00:00:54.96Z | g2 | oil_temp | 40.6 | 2021-07-02T00:00:00Z | manned | +| 2021-08-01T00:01:17.27Z | g2 | oil_temp | 40.6 | 2021-07-02T00:00:00Z | manned | +| 2021-08-01T00:01:41.84Z | g2 | oil_temp | 40.6 | 2021-07-02T00:00:00Z | manned | + +| _time | stationID | _field | _value | maintained | opType | +| :---------------------- | :-------- | :------- | -----: | :--------- | :----- | +| 2021-08-01T00:00:00Z | g3 | oil_temp | 41.4 | | | +| 2021-08-01T00:00:14.46Z | g3 | oil_temp | 41.36 | | | +| 2021-08-01T00:00:25.29Z | g3 | oil_temp | 41.4 | | | +| 2021-08-01T00:00:38.77Z | g3 | oil_temp | 41.4 | | | +| 2021-08-01T00:00:51.2Z | g3 | oil_temp | 41.4 | | | + +| _time | stationID | _field | _value | maintained | opType | +| :---- | :-------- | :----- | -----: | :------------------- | :----- | +| | g4 | | | 2021-08-04T00:00:00Z | auto | + +#### Things to note about the join output +- Because the [right stream](#right-input) does not have rows with the `g3` stationID tag, + the joined output includes rows with the `g3` stationID tag from the [left stream](#left-input) + with _null_ values in columns populated from the **right** stream. +- Because the [left stream](#left-input) does not have rows with the `g4` stationID tag, + the joined output includes rows with the `g4` stationID tag from the [right stream](#right-input) + with _null_ values in columns populated from the **left** stream. + +{{% /expand %}} +{{< /expand-wrapper >}} + diff --git a/content/flux/v0.x/join-data/inner.md b/content/flux/v0.x/join-data/inner.md new file mode 100644 index 000000000..25d36c767 --- /dev/null +++ b/content/flux/v0.x/join-data/inner.md @@ -0,0 +1,196 @@ +--- +title: Perform an inner join +description: > + Use [`join.inner()`](/flux/v0.x/stdlib/join/inner/) to perform an inner join of two streams of data. + Inner joins drop any rows from both input streams that do not have a matching + row in the other stream. +menu: + flux_0_x: + name: Inner join + parent: Join data +weight: 101 +related: + - /flux/v0.x/join-data/troubleshoot-joins/ + - /flux/v0.x/stdlib/join/ + - /flux/v0.x/stdlib/join/inner/ +list_code_example: | + ```js + import "join" + + left = from(bucket: "example-bucket-1") |> //... + right = from(bucket: "example-bucket-2") |> //... + + join.inner( + left: left, + right: right, + on: (l, r) => l.column == r.column, + as: (l, r) => ({l with name: r.name, location: r.location}), + ) + ``` +--- + +Use [`join.inner()`](/flux/v0.x/stdlib/join/inner/) to perform an inner join of two streams of data. +Inner joins drop any rows from both input streams that do not have a matching +row in the other stream. + +{{< svg svg="static/svgs/join-diagram.svg" class="inner" >}} + +{{< expand-wrapper >}} +{{% expand "View table illustration of an inner join" %}} +{{< flex >}} +{{% flex-content "third" %}} +#### left +| | | | +| :-- | :----------------------------------- | :----------------------------------- | +| r1 | | | +| r2 | | | +{{% /flex-content %}} +{{% flex-content "third" %}} +#### right +| | | | +| :-- | :----------------------------------- | :----------------------------------- | +| r1 | | | +| r3 | | | +| r4 | | | +{{% /flex-content %}} +{{% flex-content "third" %}} +#### Inner join result + +| | | | | | +| :-- | :----------------------------------- | :----------------------------------- | :----------------------------------- | :----------------------------------- | +| r1 | | | | | +{{% /flex-content %}} +{{< /flex >}} +{{% /expand %}} +{{< /expand-wrapper >}} + +## Use join.inner to join your data + +1. Import the `join` package. +2. Define the **left** and **right** data streams to join: + + - Each stream must have one or more columns with common values. + Column labels do not need to match, but column values do. + - Each stream should have identical [group keys](/flux/v0.x/get-started/data-model/#group-key). + + _For more information, see [join data requirements](/flux/v0.x/join-data/#data-requirements)._ + +3. Use `join.inner()` to join the two streams together. + Provide the following required parameters: + + - `left`: Stream of data representing the left side of the join. + - `right`: Stream of data representing the right side of the join. + - `on`: [Join predicate](/flux/v0.x/join-data/#join-predicate-function-on). + For example: `(l, r) => l.column == r.column`. + - `as`: [Join output function](/flux/v0.x/join-data/#join-output-function-as) + that returns a record with values from each input stream. + For example: `(l, r) => ({l with column1: r.column1, column2: r.column2})`. + +The following example uses a filtered selection from the +[**machineProduction** sample data set](/flux/v0.x/stdlib/influxdata/influxdb/sample/data/#set) +as the **left** data stream and an ad-hoc table created with [`array.from()`](/flux/v0.x/stdlib/array/from/) +as the **right** data stream. + +{{% note %}} +#### Example data grouping + +The example below ungroups the **left** stream to match the grouping of the **right** stream. +After the two streams are joined together, the joined data is grouped by `stationID`. +{{% /note %}} + +```js +import "array" +import "influxdata/influxdb/sample" +import "join" + +left = + sample.data(set: "machineProduction") + |> filter(fn: (r) => r.stationID == "g1" or r.stationID == "g2" or r.stationID == "g3") + |> filter(fn: (r) => r._field == "oil_temp") + |> limit(n: 5) + +right = + array.from( + rows: [ + {station: "g1", opType: "auto", last_maintained: 2021-07-15T00:00:00Z}, + {station: "g2", opType: "manned", last_maintained: 2021-07-02T00:00:00Z}, + ], + ) + +join.inner( + left: left |> group(), + right: right, + on: (l, r) => l.stationID == r.station, + as: (l, r) => ({l with opType: r.opType, maintained: r.last_maintained}), +) + |> group(columns: ["stationID"]) +``` + +{{< expand-wrapper >}} +{{% expand "View example input and output" %}} + +{{% note %}} +_`_start` and `_stop` columns have been omitted from example input and output._ +{{% /note %}} + +### Input + +#### left {#left-input} + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -----: | +| 2021-08-01T00:00:00Z | machinery | g1 | oil_temp | 39.1 | +| 2021-08-01T00:00:11.51Z | machinery | g1 | oil_temp | 40.3 | +| 2021-08-01T00:00:19.53Z | machinery | g1 | oil_temp | 40.6 | +| 2021-08-01T00:00:25.1Z | machinery | g1 | oil_temp | 40.72 | +| 2021-08-01T00:00:36.88Z | machinery | g1 | oil_temp | 40.8 | + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -----: | +| 2021-08-01T00:00:00Z | machinery | g2 | oil_temp | 40.6 | +| 2021-08-01T00:00:27.93Z | machinery | g2 | oil_temp | 40.6 | +| 2021-08-01T00:00:54.96Z | machinery | g2 | oil_temp | 40.6 | +| 2021-08-01T00:01:17.27Z | machinery | g2 | oil_temp | 40.6 | +| 2021-08-01T00:01:41.84Z | machinery | g2 | oil_temp | 40.6 | + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -----: | +| 2021-08-01T00:00:00Z | machinery | g3 | oil_temp | 41.4 | +| 2021-08-01T00:00:14.46Z | machinery | g3 | oil_temp | 41.36 | +| 2021-08-01T00:00:25.29Z | machinery | g3 | oil_temp | 41.4 | +| 2021-08-01T00:00:38.77Z | machinery | g3 | oil_temp | 41.4 | +| 2021-08-01T00:00:51.2Z | machinery | g3 | oil_temp | 41.4 | + +#### right {#right-input} + +| station | opType | last_maintained | +| :------ | :----- | -------------------: | +| g1 | auto | 2021-07-15T00:00:00Z | +| g2 | manned | 2021-07-02T00:00:00Z | + +### Output {#example-output} + +| _time | _measurement | stationID | _field | _value | opType | maintained | +| :---------------------- | :----------- | :-------- | :------- | -----: | :----- | :------------------- | +| 2021-08-01T00:00:00Z | machinery | g1 | oil_temp | 39.1 | auto | 2021-07-15T00:00:00Z | +| 2021-08-01T00:00:11.51Z | machinery | g1 | oil_temp | 40.3 | auto | 2021-07-15T00:00:00Z | +| 2021-08-01T00:00:19.53Z | machinery | g1 | oil_temp | 40.6 | auto | 2021-07-15T00:00:00Z | +| 2021-08-01T00:00:25.1Z | machinery | g1 | oil_temp | 40.72 | auto | 2021-07-15T00:00:00Z | +| 2021-08-01T00:00:36.88Z | machinery | g1 | oil_temp | 40.8 | auto | 2021-07-15T00:00:00Z | + +| _time | _measurement | stationID | _field | _value | opType | maintained | +| :---------------------- | :----------- | :-------- | :------- | -----: | :----- | :------------------- | +| 2021-08-01T00:00:00Z | machinery | g2 | oil_temp | 40.6 | manned | 2021-07-02T00:00:00Z | +| 2021-08-01T00:00:27.93Z | machinery | g2 | oil_temp | 40.6 | manned | 2021-07-02T00:00:00Z | +| 2021-08-01T00:00:54.96Z | machinery | g2 | oil_temp | 40.6 | manned | 2021-07-02T00:00:00Z | +| 2021-08-01T00:01:17.27Z | machinery | g2 | oil_temp | 40.6 | manned | 2021-07-02T00:00:00Z | +| 2021-08-01T00:01:41.84Z | machinery | g2 | oil_temp | 40.6 | manned | 2021-07-02T00:00:00Z | + +#### Things to note about the join output +- Because the [right stream](#right-input) does not have a row with the `g3` station tag, + the joined output drops all rows with the `g3` stationID tag from the [left stream](#left-input). + `join.inner()` drops any rows that do not have a matching row in the other + data stream. + +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/flux/v0.x/join-data/left-outer.md b/content/flux/v0.x/join-data/left-outer.md new file mode 100644 index 000000000..c95074feb --- /dev/null +++ b/content/flux/v0.x/join-data/left-outer.md @@ -0,0 +1,207 @@ +--- +title: Perform a left outer join +description: > + Use [`join.left()`](/flux/v0.x/stdlib/join/left/) to perform an outer left join of two streams of data. + Left joins output a row for each row in the **left** data stream with data matching + from the **right** data stream. If there is no matching data in the **right** + data stream, non-group-key columns with values from the **right** data stream are _null_. +menu: + flux_0_x: + name: Left outer join + parent: Join data +weight: 102 +related: + - /flux/v0.x/join-data/troubleshoot-joins/ + - /flux/v0.x/stdlib/join/ + - /flux/v0.x/stdlib/join/left/ +list_code_example: | + ```js + import "join" + + left = from(bucket: "example-bucket-1") |> //... + right = from(bucket: "example-bucket-2") |> //... + + join.left( + left: left, + right: right, + on: (l, r) => l.column == r.column, + as: (l, r) => ({l with name: r.name, location: r.location}), + ) + ``` +--- + +Use [`join.left()`](/flux/v0.x/stdlib/join/left/) to perform an left outer join of two streams of data. +Left joins output a row for each row in the **left** data stream with data matching +from the **right** data stream. If there is no matching data in the **right** +data stream, non-group-key columns with values from the **right** data stream are _null_. + +{{< svg svg="static/svgs/join-diagram.svg" class="left" >}} + +{{< expand-wrapper >}} +{{% expand "View table illustration of a left outer join" %}} +{{< flex >}} +{{% flex-content "third" %}} +#### left +| | | | +| :-- | :----------------------------------- | :----------------------------------- | +| r1 | | | +| r2 | | | +{{% /flex-content %}} +{{% flex-content "third" %}} +#### right +| | | | +| :-- | :----------------------------------- | :----------------------------------- | +| r1 | | | +| r3 | | | +| r4 | | | +{{% /flex-content %}} +{{% flex-content "third" %}} +#### Left outer join result + +| | | | | | +| :-- | :----------------------------------- | :----------------------------------- | :----------------------------------- | :----------------------------------- | +| r1 | | | | | +| r2 | | | | | +{{% /flex-content %}} +{{< /flex >}} +{{% /expand %}} +{{< /expand-wrapper >}} + +## Use join.left to join your data + +1. Import the `join` package. +2. Define the **left** and **right** data streams to join: + + - Each stream must have one or more columns with common values. + Column labels do not need to match, but column values do. + - Each stream should have identical [group keys](/flux/v0.x/get-started/data-model/#group-key). + + _For more information, see [join data requirements](/flux/v0.x/join-data/#data-requirements)._ + +3. Use `join.left()` to join the two streams together. + Provide the following parameters: + + - `left`: Stream of data representing the left side of the join. + - `right`: Stream of data representing the right side of the join. + - `on`: [Join predicate](/flux/v0.x/join-data/#join-predicate-function-on). + For example: `(l, r) => l.column == r.column`. + - `as`: [Join output function](/flux/v0.x/join-data/#join-output-function-as) + that returns a record with values from each input stream. + For example: `(l, r) => ({l with column1: r.column1, column2: r.column2})`. + +The following example uses a filtered selection from the +[**machineProduction** sample data set](/flux/v0.x/stdlib/influxdata/influxdb/sample/data/#set) +as the **left** data stream and an ad-hoc table created with [`array.from()`](/flux/v0.x/stdlib/array/from/) +as the **right** data stream. + +{{% note %}} +#### Example data grouping + +The example below ungroups the **left** stream to match the grouping of the **right** stream. +After the two streams are joined together, the joined data is grouped by `stationID`. +{{% /note %}} + +```js +import "array" +import "influxdata/influxdb/sample" +import "join" + +left = + sample.data(set: "machineProduction") + |> filter(fn: (r) => r.stationID == "g1" or r.stationID == "g2" or r.stationID == "g3") + |> filter(fn: (r) => r._field == "oil_temp") + |> limit(n: 5) + +right = + array.from( + rows: [ + {station: "g1", opType: "auto", last_maintained: 2021-07-15T00:00:00Z}, + {station: "g2", opType: "manned", last_maintained: 2021-07-02T00:00:00Z}, + ], + ) + +join.left( + left: left |> group(), + right: right, + on: (l, r) => l.stationID == r.station, + as: (l, r) => ({l with opType: r.opType, maintained: r.last_maintained}), +) + |> group(columns: ["stationID"]) +``` + +{{< expand-wrapper >}} +{{% expand "View example input and output" %}} + +{{% note %}} +_`_start` and `_stop` columns have been omitted from example input and output._ +{{% /note %}} + +### Input + +#### left {#left-input} + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -----: | +| 2021-08-01T00:00:00Z | machinery | g1 | oil_temp | 39.1 | +| 2021-08-01T00:00:11.51Z | machinery | g1 | oil_temp | 40.3 | +| 2021-08-01T00:00:19.53Z | machinery | g1 | oil_temp | 40.6 | +| 2021-08-01T00:00:25.1Z | machinery | g1 | oil_temp | 40.72 | +| 2021-08-01T00:00:36.88Z | machinery | g1 | oil_temp | 40.8 | + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -----: | +| 2021-08-01T00:00:00Z | machinery | g2 | oil_temp | 40.6 | +| 2021-08-01T00:00:27.93Z | machinery | g2 | oil_temp | 40.6 | +| 2021-08-01T00:00:54.96Z | machinery | g2 | oil_temp | 40.6 | +| 2021-08-01T00:01:17.27Z | machinery | g2 | oil_temp | 40.6 | +| 2021-08-01T00:01:41.84Z | machinery | g2 | oil_temp | 40.6 | + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -----: | +| 2021-08-01T00:00:00Z | machinery | g3 | oil_temp | 41.4 | +| 2021-08-01T00:00:14.46Z | machinery | g3 | oil_temp | 41.36 | +| 2021-08-01T00:00:25.29Z | machinery | g3 | oil_temp | 41.4 | +| 2021-08-01T00:00:38.77Z | machinery | g3 | oil_temp | 41.4 | +| 2021-08-01T00:00:51.2Z | machinery | g3 | oil_temp | 41.4 | + +#### right {#right-input} + +| station | opType | last_maintained | +| :------ | :----- | -------------------: | +| g1 | auto | 2021-07-15T00:00:00Z | +| g2 | manned | 2021-07-02T00:00:00Z | + +### Output {#example-output} + +| _time | _measurement | stationID | _field | _value | opType | maintained | +| :---------------------- | :----------- | :-------- | :------- | -----: | :----- | :------------------- | +| 2021-08-01T00:00:00Z | machinery | g1 | oil_temp | 39.1 | auto | 2021-07-15T00:00:00Z | +| 2021-08-01T00:00:11.51Z | machinery | g1 | oil_temp | 40.3 | auto | 2021-07-15T00:00:00Z | +| 2021-08-01T00:00:19.53Z | machinery | g1 | oil_temp | 40.6 | auto | 2021-07-15T00:00:00Z | +| 2021-08-01T00:00:25.1Z | machinery | g1 | oil_temp | 40.72 | auto | 2021-07-15T00:00:00Z | +| 2021-08-01T00:00:36.88Z | machinery | g1 | oil_temp | 40.8 | auto | 2021-07-15T00:00:00Z | + +| _time | _measurement | stationID | _field | _value | opType | maintained | +| :---------------------- | :----------- | :-------- | :------- | -----: | :----- | :------------------- | +| 2021-08-01T00:00:00Z | machinery | g2 | oil_temp | 40.6 | manned | 2021-07-02T00:00:00Z | +| 2021-08-01T00:00:27.93Z | machinery | g2 | oil_temp | 40.6 | manned | 2021-07-02T00:00:00Z | +| 2021-08-01T00:00:54.96Z | machinery | g2 | oil_temp | 40.6 | manned | 2021-07-02T00:00:00Z | +| 2021-08-01T00:01:17.27Z | machinery | g2 | oil_temp | 40.6 | manned | 2021-07-02T00:00:00Z | +| 2021-08-01T00:01:41.84Z | machinery | g2 | oil_temp | 40.6 | manned | 2021-07-02T00:00:00Z | + +| _time | _measurement | stationID | _field | _value | opType | maintained | +| :---------------------- | :----------- | :-------- | :------- | -----: | :----- | :--------- | +| 2021-08-01T00:00:00Z | machinery | g3 | oil_temp | 41.4 | | | +| 2021-08-01T00:00:14.46Z | machinery | g3 | oil_temp | 41.3 | | | +| 2021-08-01T00:00:25.29Z | machinery | g3 | oil_temp | 41.4 | | | +| 2021-08-01T00:00:38.77Z | machinery | g3 | oil_temp | 41.4 | | | +| 2021-08-01T00:00:51.2Z | machinery | g3 | oil_temp | 41.4 | | | + +#### Things to note about the join output +- Because the [right stream](#right-input) does not have a row with the `g3` station tag, + rows from the [left stream](#left-input) with the `g3` stationID tag include + _null_ values in columns that are populated from the right stream (`r`) in the + `as` parameter. +{{% /expand %}} +{{< /expand-wrapper >}} + diff --git a/content/flux/v0.x/join-data/right-outer.md b/content/flux/v0.x/join-data/right-outer.md new file mode 100644 index 000000000..309777ad8 --- /dev/null +++ b/content/flux/v0.x/join-data/right-outer.md @@ -0,0 +1,170 @@ +--- +title: Perform a right outer join +description: > + Use [`join.right()`](/flux/v0.x/stdlib/join/right/) to perform an right outer join of two streams of data. + Right joins output a row for each row in the **right** data stream with data matching + from the **left** data stream. If there is no matching data in the **left** + data stream, non-group-key columns with values from the **left** data stream are _null_. +menu: + flux_0_x: + name: Right outer join + parent: Join data +weight: 102 +related: + - /flux/v0.x/join-data/troubleshoot-joins/ + - /flux/v0.x/stdlib/join/ + - /flux/v0.x/stdlib/join/right/ +list_code_example: | + ```js + import "join" + + left = from(bucket: "example-bucket-1") |> //... + right = from(bucket: "example-bucket-2") |> //... + + join.right( + left: left, + right: right, + on: (l, r) => l.column == r.column, + as: (l, r) => ({r with name: l.name, location: l.location}), + ) + ``` +--- + +Use [`join.right()`](/flux/v0.x/stdlib/join/right/) to perform an right outer join of two streams of data. +Right joins output a row for each row in the **right** data stream with data matching +from the **left** data stream. If there is no matching data in the **left** +data stream, non-group-key columns with values from the **left** data stream are _null_. + +{{< svg svg="static/svgs/join-diagram.svg" class="right" >}} + +{{< expand-wrapper >}} +{{% expand "View table illustration of a right outer join" %}} +{{< flex >}} +{{% flex-content "third" %}} +#### left +| | | | +| :-- | :----------------------------------- | :----------------------------------- | +| r1 | | | +| r2 | | | +{{% /flex-content %}} +{{% flex-content "third" %}} +#### right +| | | | +| :-- | :----------------------------------- | :----------------------------------- | +| r1 | | | +| r3 | | | +| r4 | | | +{{% /flex-content %}} +{{% flex-content "third" %}} +#### Right outer join result + +| | | | | | +| :-- | :----------------------------------- | :----------------------------------- | :----------------------------------- | :----------------------------------- | +| r1 | | | | | +| r3 | | | | | +| r4 | | | | | + +{{% /flex-content %}} +{{< /flex >}} +{{% /expand %}} +{{< /expand-wrapper >}} + +## Use join.right to join your data + +1. Import the `join` package. +2. Define the **left** and **right** data streams to join: + + - Each stream must have one or more columns with common values. + Column labels do not need to match, but column values do. + - Each stream should have identical [group keys](/flux/v0.x/get-started/data-model/#group-key). + + _For more information, see [join data requirements](/flux/v0.x/join-data/#data-requirements)._ + +3. Use `join.right()` to join the two streams together. + Provide the following required parameters: + + - `left`: Stream of data representing the left side of the join. + - `right`: Stream of data representing the right side of the join. + - `on`: [Join predicate](/flux/v0.x/join-data/#join-predicate-function-on). + For example: `(l, r) => l.column == r.column`. + - `as`: [Join output function](/flux/v0.x/join-data/#join-output-function-as) + that returns a record with values from each input stream. + For example: `(l, r) => ({r with column1: l.column1, column2: l.column2})`. + +The following example uses a filtered selection from the +[**machineProduction** sample data set](/flux/v0.x/stdlib/influxdata/influxdb/sample/data/#set) +as the **left** data stream and an ad-hoc table created with [`array.from()`](/flux/v0.x/stdlib/array/from/) +as the **right** data stream. + +{{% note %}} +#### Example data grouping + +The example below ungroups the **left** stream to match the grouping of the **right** stream. +{{% /note %}} + +```js +import "array" +import "influxdata/influxdb/sample" +import "join" + +left = + sample.data(set: "machineProduction") + |> filter(fn: (r) => r.stationID == "g1" or r.stationID == "g2" or r.stationID == "g3") + |> filter(fn: (r) => r._field == "oil_temp") + |> last() + +right = + array.from( + rows: [ + {station: "g1", opType: "auto", last_maintained: 2021-07-15T00:00:00Z}, + {station: "g2", opType: "manned", last_maintained: 2021-07-02T00:00:00Z}, + ], + ) + +join.right( + left: left |> group(), + right: right, + on: (l, r) => l.stationID == r.station, + as: (l, r) => ({r with last_reported_val: l._value, last_reported_time: l._time}), +) +``` + +{{< expand-wrapper >}} +{{% expand "View example input and output" %}} + +### Input + +#### left {#left-input} + +{{% note %}} +_`_start` and `_stop` columns have been omitted._ +{{% /note %}} + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -----: | +| 2021-08-01T23:59:46.17Z | machinery | g1 | oil_temp | 40.6 | + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -----: | +| 2021-08-01T23:59:34.57Z | machinery | g2 | oil_temp | 41.34 | + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -----: | +| 2021-08-01T23:59:41.96Z | machinery | g3 | oil_temp | 41.26 | + +#### right {#right-input} + +| station | opType | last_maintained | +| :------ | :----- | -------------------: | +| g1 | auto | 2021-07-15T00:00:00Z | +| g2 | manned | 2021-07-02T00:00:00Z | + +### Output {#example-output} + +| station | opType | last_maintained | last_reported_time | last_reported_val | +| :------ | :----- | :------------------- | :---------------------- | ----------------: | +| g1 | auto | 2021-07-15T00:00:00Z | 2021-08-01T23:59:46.17Z | 40.6 | +| g2 | manned | 2021-07-02T00:00:00Z | 2021-08-01T23:59:34.57Z | 41.34 | + +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/flux/v0.x/join-data/time.md b/content/flux/v0.x/join-data/time.md new file mode 100644 index 000000000..bc74851bc --- /dev/null +++ b/content/flux/v0.x/join-data/time.md @@ -0,0 +1,205 @@ +--- +title: Join on time +description: > + Use [`join.time()`](/flux/v0.x/stdlib/join/time/) to join two streams of data + based on time values in the `_time` column. + This type of join operation is common when joining two streams of + [time series data](/influxdb/latest/reference/glossary/#time-series-data). +menu: + flux_0_x: + parent: Join data +weight: 104 +related: + - /flux/v0.x/join-data/troubleshoot-joins/ + - /flux/v0.x/stdlib/join/ + - /flux/v0.x/stdlib/join/time/ +list_code_example: | + ```js + import "join" + + left = from(bucket: "example-bucket-1") |> //... + right = from(bucket: "example-bucket-2") |> //... + + join.time( + left: left, + right: right, + as: (l, r) => ({l with field1: l._value, field2: r._value_}), + ) + ``` +--- + +Use [`join.time()`](/flux/v0.x/stdlib/join/time/) to join two streams of data +based on time values in the `_time` column. +This type of join operation is common when joining two streams of +[time series data](/influxdb/latest/reference/glossary/#time-series-data). + +`join.time()` can use any of the available join methods. +Which method you use depends on your desired behavior: + +- **inner** _(Default)_: + Drop any rows from both input streams that do not have a matching + row in the other stream. + +- **left**: + Output a row for each row in the **left** data stream with data matching + from the **right** data stream. If there is no matching data in the **right** + data stream, non-group-key columns with values from the **right** data stream + are _null_. + +- **right**: + Output a row for each row in the **right** data stream with data matching + from the **left** data stream. If there is no matching data in the **left** + data stream, non-group-key columns with values from the **left** data stream + are _null_. + +- **full**: + Output a row for all rows in both the **left** and **right** input streams + and join rows that match based on their `_time` value. + +## Use join.time to join your data + +1. Import the `join` package. +2. Define the **left** and **right** data streams to join: + + - Each stream must also have a `_time` column. + - Each stream must have one or more columns with common values. + Column labels do not need to match, but column values do. + - Each stream should have identical [group keys](/flux/v0.x/get-started/data-model/#group-key). + + _For more information, see [join data requirements](/flux/v0.x/join-data/#data-requirements)._ + +3. Use `join.time()` to join the two streams together. + Provide the following parameters: + + - `left`: ({{< req >}}) Stream of data representing the left side of the join. + - `right`: ({{< req >}}) Stream of data representing the right side of the join. + - `as`: ({{< req >}}) [Join output function](/flux/v0.x/join-data/#join-output-function-as) + that returns a record with values from each input stream. + For example: `(l, r) => ({r with column1: l.column1, column2: l.column2})`. + - `method`: Join method to use. Default is `inner`. + +The following example uses a filtered selections from the +[**machineProduction** sample data set](/flux/v0.x/stdlib/influxdata/influxdb/sample/data/#set) +as the **left** and **right** data streams. + +{{% note %}} +#### Example data grouping + +The example below regroups both the left and right streams to remove the +`_field` column from the group key. +Because `join.time()` only compares tables with matching +[group key instances](/flux/v0.x/get-started/data-model/#example-group-key-instances), +to join streams with different `_field` column values, `_field` cannot be part +of the group key. +{{% /note %}} + +```js +import "influxdata/influxdb/sample" +import "join" + +left = + sample.data(set: "machineProduction") + |> filter(fn: (r) => r.stationID == "g1" or r.stationID == "g2" or r.stationID == "g3") + |> filter(fn: (r) => r._field == "pressure") + |> limit(n: 5) + |> group(columns: ["_time", "_value", "_field"], mode: "except") + +right = + sample.data(set: "machineProduction") + |> filter(fn: (r) => r.stationID == "g1" or r.stationID == "g2" or r.stationID == "g3") + |> filter(fn: (r) => r._field == "pressure_target") + |> limit(n: 5) + |> group(columns: ["_time", "_value", "_field"], mode: "except") + +join.time(method: "left", left: left, right: right, as: (l, r) => ({l with target: r._value})) +``` + +{{< expand-wrapper >}} +{{% expand "View example input and output" %}} + +### Input + +{{% note %}} +_`_start` and `_stop` columns have been omitted from input examples._ +{{% /note %}} + +#### left {#left-input} + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -------: | +| 2021-08-01T00:00:00Z | machinery | g1 | pressure | 110.2617 | +| 2021-08-01T00:00:11.51Z | machinery | g1 | pressure | 110.3506 | +| 2021-08-01T00:00:19.53Z | machinery | g1 | pressure | 110.1836 | +| 2021-08-01T00:00:25.1Z | machinery | g1 | pressure | 109.6387 | +| 2021-08-01T00:00:36.88Z | machinery | g1 | pressure | 110.5021 | + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -------: | +| 2021-08-01T00:00:00Z | machinery | g2 | pressure | 105.392 | +| 2021-08-01T00:00:27.93Z | machinery | g2 | pressure | 105.3786 | +| 2021-08-01T00:00:54.96Z | machinery | g2 | pressure | 105.4801 | +| 2021-08-01T00:01:17.27Z | machinery | g2 | pressure | 105.5656 | +| 2021-08-01T00:01:41.84Z | machinery | g2 | pressure | 105.5495 | + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :------- | -------: | +| 2021-08-01T00:00:00Z | machinery | g3 | pressure | 110.5309 | +| 2021-08-01T00:00:14.46Z | machinery | g3 | pressure | 110.3746 | +| 2021-08-01T00:00:25.29Z | machinery | g3 | pressure | 110.3719 | +| 2021-08-01T00:00:38.77Z | machinery | g3 | pressure | 110.5362 | +| 2021-08-01T00:00:51.2Z | machinery | g3 | pressure | 110.4514 | + +#### right {#right-input} + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :-------------- | -----: | +| 2021-08-01T00:00:00Z | machinery | g1 | pressure_target | 110 | +| 2021-08-01T00:00:11.51Z | machinery | g1 | pressure_target | 110 | +| 2021-08-01T00:00:19.53Z | machinery | g1 | pressure_target | 110 | +| 2021-08-01T00:00:25.1Z | machinery | g1 | pressure_target | 110 | +| 2021-08-01T00:00:36.88Z | machinery | g1 | pressure_target | 110 | + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :-------------- | -----: | +| 2021-08-01T00:00:00Z | machinery | g2 | pressure_target | 105 | +| 2021-08-01T00:00:27.93Z | machinery | g2 | pressure_target | 105 | +| 2021-08-01T00:00:54.96Z | machinery | g2 | pressure_target | 105 | +| 2021-08-01T00:01:17.27Z | machinery | g2 | pressure_target | 105 | +| 2021-08-01T00:01:41.84Z | machinery | g2 | pressure_target | 105 | + +| _time | _measurement | stationID | _field | _value | +| :---------------------- | :----------- | :-------- | :-------------- | -----: | +| 2021-08-01T00:00:00Z | machinery | g3 | pressure_target | 110 | +| 2021-08-01T00:00:14.46Z | machinery | g3 | pressure_target | 110 | +| 2021-08-01T00:00:25.29Z | machinery | g3 | pressure_target | 110 | +| 2021-08-01T00:00:38.77Z | machinery | g3 | pressure_target | 110 | +| 2021-08-01T00:00:51.2Z | machinery | g3 | pressure_target | 110 | + +### Output {#example-output} + +| _time | _measurement | stationID | _field | _value | target | +| :---------------------- | :----------- | :-------- | :------- | -------: | :----- | +| 2021-08-01T00:00:00Z | machinery | g1 | pressure | 110.2617 | 110 | +| 2021-08-01T00:00:11.51Z | machinery | g1 | pressure | 110.3506 | 110 | +| 2021-08-01T00:00:19.53Z | machinery | g1 | pressure | 110.1836 | 110 | +| 2021-08-01T00:00:25.1Z | machinery | g1 | pressure | 109.6387 | 110 | +| 2021-08-01T00:00:36.88Z | machinery | g1 | pressure | 110.5021 | 110 | + +| _time | _measurement | stationID | _field | _value | target | +| :---------------------- | :----------- | :-------- | :------- | -------: | :----- | +| 2021-08-01T00:00:00Z | machinery | g2 | pressure | 105.392 | 105 | +| 2021-08-01T00:00:27.93Z | machinery | g2 | pressure | 105.3786 | 105 | +| 2021-08-01T00:00:54.96Z | machinery | g2 | pressure | 105.4801 | 105 | +| 2021-08-01T00:01:17.27Z | machinery | g2 | pressure | 105.5656 | 105 | +| 2021-08-01T00:01:41.84Z | machinery | g2 | pressure | 105.5495 | 105 | + +| _time | _measurement | stationID | _field | _value | target | +| :---------------------- | :----------- | :-------- | :------- | -------: | :----- | +| 2021-08-01T00:00:00Z | machinery | g3 | pressure | 110.5309 | 110 | +| 2021-08-01T00:00:14.46Z | machinery | g3 | pressure | 110.3746 | 110 | +| 2021-08-01T00:00:25.29Z | machinery | g3 | pressure | 110.3719 | 110 | +| 2021-08-01T00:00:38.77Z | machinery | g3 | pressure | 110.5362 | 110 | +| 2021-08-01T00:00:51.2Z | machinery | g3 | pressure | 110.4514 | 110 | + +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/flux/v0.x/join-data/troubleshoot-joins.md b/content/flux/v0.x/join-data/troubleshoot-joins.md new file mode 100644 index 000000000..b7fbd9525 --- /dev/null +++ b/content/flux/v0.x/join-data/troubleshoot-joins.md @@ -0,0 +1,186 @@ +--- +title: Troubleshoot join operations +description: > + Learn how to troubleshoot common behaviors and errors that may occur when using + the [`join` package](/flux/v0.x/stdlib/join). +menu: + flux_0_x: + name: Troubleshoot joins + parent: Join data +weight: 105 +--- + +Learn how to troubleshoot common behaviors and errors that may occur when using +the [`join` package](/flux/v0.x/stdlib/join). + +{{% note %}} +#### Submit issues for unexplained behaviors or errors + +This is a "living" document that may be updated with common issues +that users may run into when using the [`join` package](/flux/v0.x/stdlib/join). +If you have questions about a behavior or error that is not documented here, +please submit an issue to either the InfluxData Documentation or Flux GitHub repositories: + +- [Submit a documentation issue](https://github.com/influxdata/docs-v2/issues/new/choose) +- [Submit a Flux issue](https://github.com/influxdata/flux/issues/new/choose) +{{% /note %}} + +- [Troubleshoot join behaviors](#troubleshoot-join-behaviors) +- [Troubleshoot join error messages](#troubleshoot-join-error-messages) + +## Troubleshoot join behaviors + +### Columns explicitly mapped in the join are null + +In some cases, your join output may include _null_ values in +columns where you expect non-null values. This may be caused by one of the following issues: + +--- + +{{< flex class="troubleshoot-row" >}} +{{% flex-content %}} +#### Cause {#cause-b1} + +**The group keys of each input stream aren't the same.** +Functions in the `join` package use group keys to quickly identify what tables +should be compared. +{{% /flex-content %}} +{{% flex-content %}} +#### Solution {#solution-b1} + +Use [`group()`](/flux/v0.x/stdlib/universe/group/) to regroup +your two input streams so their group keys match before attempting to join +them together. +{{% /flex-content %}} +{{< /flex >}} + +--- + +{{< flex >}} +{{% flex-content %}} +#### Cause {#cause-b2} + +**There are no matching _group key instances_ in your data streams**. +Functions in the `join` package only compare tables with matching +[group key instances](/flux/v0.x/get-started/data-model/#example-group-key-instances). +Input streams may have matching group keys, but there are no matching group +key instances in your stream. + +This may happen when joining two separate fields +queried from InfluxDB. By default, InfluxDB returns data with `_field` as part +of the group key. If each stream contains a different field, tables in the two +streams won't be compared because they won't have any matching _group key instances_. +{{% /flex-content %}} + +{{% flex-content %}} +#### Solution {#solution-b2} + +Use [`group()`](/flux/v0.x/stdlib/universe/group/) to remove +any columns from the group keys of each input stream that would prevent +group key instances from matching. +{{% /flex-content %}} +{{< /flex >}} + +--- + +## Troubleshoot join error messages + +- [table is missing column \'\\'](#table-is-missing-column-column) +- [table is missing label \](#table-is-missing-label-label) +- [record is missing label \](#record-is-missing-label-label) + +### table is missing column `''` + +##### Error message +```js +cannot set join columns in left table stream: table is missing column '' +``` + +{{< flex >}} +{{% flex-content %}} +#### Cause {#cause-e1} + +**Your `on` join predicate uses a column that doesn't exist**. +In the `on` predicate function, you're trying to compare a column +that doesn't exist in one of your input streams. +{{% /flex-content %}} +{{% flex-content %}} +#### Solution {#solution-e1} + +Ensure the columns that you're comparing in the `on` predicate +function exist in the input streams. +If necessary, update column names in the predicate function. +{{% /flex-content %}} +{{< /flex >}} + +--- + +### table is missing label `