diff --git a/config.staging.toml b/config.staging.toml index 143ec999b..fadfd2e2b 100644 --- a/config.staging.toml +++ b/config.staging.toml @@ -25,6 +25,7 @@ hrefTargetBlank = true smartDashes = false [taxonomies] + "influxdb/v2.5/tag" = "influxdb/v2.5/tags" "influxdb/v2.4/tag" = "influxdb/v2.4/tags" "influxdb/v2.3/tag" = "influxdb/v2.3/tags" "influxdb/v2.2/tag" = "influxdb/v2.2/tags" diff --git a/config.toml b/config.toml index 84ec14705..b376af7a4 100644 --- a/config.toml +++ b/config.toml @@ -21,6 +21,7 @@ hrefTargetBlank = true smartDashes = false [taxonomies] + "influxdb/v2.5/tag" = "influxdb/v2.5/tags" "influxdb/v2.4/tag" = "influxdb/v2.4/tags" "influxdb/v2.3/tag" = "influxdb/v2.3/tags" "influxdb/v2.2/tag" = "influxdb/v2.2/tags" diff --git a/content/influxdb/v2.4/reference/cli/influx/auth/create.md b/content/influxdb/v2.4/reference/cli/influx/auth/create.md index 400da604c..2fbe11129 100644 --- a/content/influxdb/v2.4/reference/cli/influx/auth/create.md +++ b/content/influxdb/v2.4/reference/cli/influx/auth/create.md @@ -6,70 +6,17 @@ menu: name: influx auth create parent: influx auth weight: 201 -updated_in: CLI v2.3.0 +updated_in: CLI 2.5.0 --- The `influx auth create` command creates an API token in InfluxDB. - {{% warn %}} -InfluxDB 2.4 introduced a bug that prevents you from creating an **all-access** or **operator** token using the `influx auth create` command, and causes the following error: `Error: could not write auth with provided arguments: 403 Forbidden: permission.` - -Until this bug is resolved in the next influx CLI release, please use the [workaround below to create an all-access or operator token](/influxdb/v2.4/security/tokens/create-token/#workaround-to-create-an-all-access-or-operator-token). +*Issue resolved**: Using InfluxDB 2.4 and influx CLI 2.4 prevented you from creating an **all-access** or **operator** token using the `influx auth create` command. This issue is resolved in the influx 2.5 CLI release. Please [upgrade to the latest version](/influxdb/latest/tools/influx-cli/) of the influx cli. {{% /warn %}} -### **Workaround:** To create an all-access or operator token - -- Use the following command to create an [all-access](/influxdb/v2.4/security/tokens/#all-access-token) or [operator](/influxdb/v2.4/security/tokens/#operator-token) token. For an **operator** token, you must also include the `--read-orgs` and `--write-orgs` flags. - -```sh -influx auth create - --org-id or --org \ - --read-authorizations \ - --write-authorizations \ - --read-buckets \ - --write-buckets \ - --read-dashboards \ - --write-dashboards \ - --read-tasks \ - --write-tasks \ - --read-telegrafs \ - --write-telegrafs \ - --read-users \ - --write-users \ - --read-variables \ - --write-variables \ - --read-secrets \ - --write-secrets \ - --read-labels \ - --write-labels \ - --read-views \ - --write-views \ - --read-documents \ - --write-documents \ - --read-notificationRules \ - --write-notificationRules \ - --read-notificationEndpoints \ - --write-notificationEndpoints \ - --read-checks \ - --write-checks \ - --read-dbrp \ - --write-dbrp \ - --read-annotations \ - --write-annotations \ - --read-sources \ - --write-sources \ - --read-scrapers \ - --write-scrapers \ - --read-notebooks \ - --write-notebooks \ - --read-remotes \ - --write-remotes \ - --read-replications \ - --write-replications -``` - ## Usage + ``` influx auth create [flags] ``` @@ -125,14 +72,14 @@ influx auth create [flags] {{< cli/influx-creds-note >}} -- [Create an All-Access API token](#create-an-all-access-api-token) {{% oss-only %}} or [Create an Operator API token](#create-an-operator-api-token){{% /oss-only %}} +- [Create an all-access API token](#create-an-all-access-api-token) {{% oss-only %}} or [Create an operator API token](#create-an-operator-api-token){{% /oss-only %}} - [Create an API token with specified read and write permissions](#create-an-api-token-with-specified-read-and-write-permissions) - [Create a token with read and write access to specific buckets](#create-an-api-token-with-read-and-write-access-to-specific-buckets) - [Create a read-only API token](#create-a-read-only-api-token) -### Create an All-Access API token +### Create an all-access API token -Create an [All-Access token](/influxdb/cloud/security/tokens/#all-access-token) to grant permissions to all resources in an organization. +Create an [all-access token](/influxdb/cloud/security/tokens/#all-access-token) to grant permissions to all resources in an organization. ```sh influx auth create \ @@ -141,9 +88,9 @@ influx auth create \ {{% oss-only %}} -### Create an Operator API token +### Create an operator API token -Create an [Operator token](/influxdb/v2.0/security/tokens/#operator-token) to grant permissions to all resources in all organizations. +Create an [operator token](/influxdb/v2.0/security/tokens/#operator-token) to grant permissions to all resources in all organizations. ```sh influx auth create \ diff --git a/content/influxdb/v2.4/reference/cli/influx/config/create.md b/content/influxdb/v2.4/reference/cli/influx/config/create.md index c2b27fe6e..6cdf68e67 100644 --- a/content/influxdb/v2.4/reference/cli/influx/config/create.md +++ b/content/influxdb/v2.4/reference/cli/influx/config/create.md @@ -10,7 +10,23 @@ updated_in: CLI 2.4.0 --- The `influx config create` command creates a InfluxDB connection configuration -and stores it in the `configs` file (by default, stored at `~/.influxdbv2/configs`). +and stores it in a local file: + +| OS/Platform | CLI config file path | +| :--------------------------- | :---------------------------------- | +| macOS | `~/.influxdbv2/configs` | +| Linux (installed as binary) | `~/.influxdbv2/configs` | +| Linux (installed as service) | `~/var/lib/influxdb/configs` | +| Windows | `%USERPROFILE%\.influxdbv2\configs` | +| Docker (DockerHub) | `/etc/influxdb2/configs` | +| Docker (Quay.io) | `/root/.influxdbv2/configs` | +| Kubernetes | `/etc/influxdb2/configs` | + +To view CLI connection configurations after creating them, use [influx config list](/influxdb/v2.4/reference/cli/influx/config/list/). + +{{% note %}} +**Note:** If you create multiple connection configurations (for example, separate admin and user configurations), use [`influx config `](/influxdb/v2.4/reference/cli/influx/config/) to switch to the configuration you want to use. +{{% /note %}} ## Usage ``` @@ -18,17 +34,17 @@ influx config create [flags] ``` ## Flags -| Flag | | Description | Input type | {{< cli/mapped >}} | -| :--- | :-------------------- | :------------------------------------------------------------------------- | :--------: | :-------------------- | -| `-a` | `--active` | Set the specified connection to be the active configuration. | | | -| `-n` | `--config-name` | ({{< req >}}) Name of the new configuration. | string | | -| `-h` | `--help` | Help for the `create` command | | | -| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | -| `-u` | `--host-url` | ({{< req >}}) Connection URL for the new configuration. | string | | -| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | -| `-o` | `--org` | Organization name | string | | -| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | -| `-p` | `--username-password` | **(OSS only)** Username (and optionally password) to use for authentication. | string | | +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :-------------------- | :------------------------------------------------------------------------------------------ | :--------: | :-------------------- | +| `-a` | `--active` | Set the specified connection to be the active configuration. | | | +| `-n` | `--config-name` | ({{< req >}}) Name of the new configuration. | string | | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| `-u` | `--host-url` | ({{< req >}}) Connection URL for the new configuration. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name | string | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| `-p` | `--username-password` | **(OSS only)** Username (and optionally password) to use for authentication. Include `username:password` to ensure a session is automatically authenticated. Include `username` (without password) to prompt for a password before creating the session. | string | | ## Examples @@ -37,6 +53,7 @@ influx config create [flags] - {{% oss-only %}}[Create a connection configuration that uses a username and password](#create-a-connection-configuration-that-uses-a-username-and-password){{% /oss-only %}} #### Create a connection configuration and set it active + ```sh influx config create --active \ -n config-name \ @@ -46,6 +63,7 @@ influx config create --active \ ``` #### Create a connection configuration without setting it active + ```sh influx config create \ -n config-name \ diff --git a/content/influxdb/v2.4/reference/cli/influx/config/set.md b/content/influxdb/v2.4/reference/cli/influx/config/set.md index 5162df8ff..a0cee58ad 100644 --- a/content/influxdb/v2.4/reference/cli/influx/config/set.md +++ b/content/influxdb/v2.4/reference/cli/influx/config/set.md @@ -6,6 +6,7 @@ menu: name: influx config set parent: influx config weight: 201 +updated_in: CLI 2.5.0 --- The `influx config set` command updates information in an InfluxDB connection @@ -20,15 +21,18 @@ influx config set [flags] `set` , `update` ## Flags -| Flag | | Description | Input type | {{< cli/mapped >}} | -| :--- | :--------------- | :-------------------------------------------------------------- | :--------: | :-------------------- | -| `-a` | `--active` | Set the specified connection to active | | | -| `-n` | `--config-name` | Name for the InfluxDB connection configuration to set or update | string | | -| `-h` | `--help` | Help for the `set` command | | | -| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | -| `-u` | `--host-url` | URL for InfluxDB connection configuration to set or update | string | | -| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | -| `-o` | `--org` | Organization name for the connection configuration | string | | +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :--------------- | :----------------------------------------------------------------------------------------- | :--------: | :-------------------- | +| `-a` | `--active` | Set the specified connection to active | | | +| `-n` | `--config-name` | Name for the InfluxDB connection configuration to set or update | string | | +| `-h` | `--help` | Help for the `set` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| `-u` | `--host-url` | URL for InfluxDB connection configuration to set or update | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name for the connection configuration | string | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| `-p` | `--username-password` | **(OSS only)** Username (and optionally password) to use for authentication. +Include `username:password` to ensure a session is automatically authenticated. Include `username` (without password) to prompt for a password before creating the session. | string | | ## Examples diff --git a/content/influxdb/v2.4/reference/release-notes/influx-cli.md b/content/influxdb/v2.4/reference/release-notes/influx-cli.md index 6b9f11c86..820478279 100644 --- a/content/influxdb/v2.4/reference/release-notes/influx-cli.md +++ b/content/influxdb/v2.4/reference/release-notes/influx-cli.md @@ -8,6 +8,23 @@ menu: name: influx CLI --- +# v2.5.0 [2022-10-21] + +### Features + +- Add the `--username-password` flag to [influx config set](/influxdb/v2.4/reference/cli/influx/config/set/). Include `username:password` after this flag to ensure a session is automatically authenticated for the config. Include `username` (without password) to prompt for a password before creating the session. + +### Maintenance + +- Upgrade to Go 1.19. +- Fix Go version in `go.mod`. + +### Bug fixes + +- Fix to allow [influx auth create](/influxdb/v2.4/reference/cli/influx/auth/create/) to successfully create an API token without error. +- Fix stack error typo. +- Fix an error where `stdin` could not be used to create tasks. + ## v2.4.0 [2022-08-18] ### Features diff --git a/content/influxdb/v2.4/write-data/delete-data.md b/content/influxdb/v2.4/write-data/delete-data.md index 3f9b5dec6..905886397 100644 --- a/content/influxdb/v2.4/write-data/delete-data.md +++ b/content/influxdb/v2.4/write-data/delete-data.md @@ -13,6 +13,7 @@ influxdb/v2.4/tags: [delete] related: - /influxdb/v2.4/reference/syntax/delete-predicate/ - /influxdb/v2.4/reference/cli/influx/delete/ + - /influxdb/v2.4/organizations/buckets/delete-bucket/ --- Use the [`influx` CLI](/influxdb/v2.4/reference/cli/influx/) or the InfluxDB API @@ -180,3 +181,5 @@ curl --request POST http://localhost:8086/api/v2/delete?org=example-org&bucket=e {{% /cloud-only %}} _For more information, see the [`/api/v2/delete` endpoint documentation](/influxdb/v2.4/api/#operation/PostDelete)._ + +To delete a bucket see [Delete a bucket](/influxdb/v2.4/organizations/buckets/delete-bucket/). \ No newline at end of file diff --git a/content/influxdb/v2.5/_index.md b/content/influxdb/v2.5/_index.md new file mode 100644 index 000000000..dccc54cae --- /dev/null +++ b/content/influxdb/v2.5/_index.md @@ -0,0 +1,19 @@ +--- +title: InfluxDB OSS 2.5 documentation +description: > + InfluxDB OSS is an open source time series database designed to handle high write and query loads. + Learn how to use and leverage InfluxDB in use cases such as monitoring metrics, IoT data, and events. +layout: landing-influxdb +menu: + influxdb_2_5: + name: InfluxDB OSS 2.5 +weight: 1 +--- + +#### Welcome +Welcome to the InfluxDB v2.5 documentation! +InfluxDB is an open source time series database designed to handle high write and query workloads. + +This documentation is meant to help you learn how to use and leverage InfluxDB to meet your needs. +Common use cases include infrastructure monitoring, IoT data collection, events handling, and more. +If your use case involves time series data, InfluxDB is purpose-built to handle it. diff --git a/content/influxdb/v2.5/admin/_index.md b/content/influxdb/v2.5/admin/_index.md new file mode 100644 index 000000000..f4da0888a --- /dev/null +++ b/content/influxdb/v2.5/admin/_index.md @@ -0,0 +1,13 @@ +--- +title: Administer InfluxDB +description: > + Use the InfluxDB API, user interface (UI), and CLIs to perform administrative + tasks in InfluxDB. +menu: influxdb_2_5 +weight: 18 +--- + +Use the InfluxDB API, user interface (UI), and CLIs to perform administrative +tasks in InfluxDB. + +{{< children >}} \ No newline at end of file diff --git a/content/influxdb/v2.5/admin/internals/_index.md b/content/influxdb/v2.5/admin/internals/_index.md new file mode 100644 index 000000000..ba0cc0c36 --- /dev/null +++ b/content/influxdb/v2.5/admin/internals/_index.md @@ -0,0 +1,17 @@ +--- +title: Manage InfluxDB internal systems +description: > + Manage the internal systems of InfluxDB such as the Time Series Index (TSI), + the time-structured merge tree (TSM) storage engine, and the write-ahead log (WAL). +menu: + influxdb_2_5: + name: Manage internal systems + parent: Administer InfluxDB +weight: 20 +cascade: + influxdb/v2.5/tags: [storage, internals] +--- + +Manage InfluxDB internal systems, including the time series index (TSI), time-structured merge tree (TSM) storage engine, and write-ahead log (WAL). + +{{< children >}} \ No newline at end of file diff --git a/content/influxdb/v2.5/admin/internals/tsi/_index.md b/content/influxdb/v2.5/admin/internals/tsi/_index.md new file mode 100644 index 000000000..9fbbcced0 --- /dev/null +++ b/content/influxdb/v2.5/admin/internals/tsi/_index.md @@ -0,0 +1,17 @@ +--- +title: Manage the InfluxDB time series index (TSI) +description: > + The InfluxDB [time series index (TSI)](/influxdb/v2.5/reference/internals/storage-engine/#time-series-index-tsi) + indexes or caches measurement and tag data to ensure queries are performant. + Use the `influxd inspect` command to manage the TSI index. +menu: + influxdb_2_5: + name: Manage TSI indexes + parent: Manage internal systems +weight: 101 +--- + +The InfluxDB [time series index (TSI)](/influxdb/v2.5/reference/internals/storage-engine/#time-series-index-tsi) +indexes or caches measurement and tag data to ensure queries are performant. + +{{< children >}} diff --git a/content/influxdb/v2.5/admin/internals/tsi/inspect.md b/content/influxdb/v2.5/admin/internals/tsi/inspect.md new file mode 100644 index 000000000..19863aeb2 --- /dev/null +++ b/content/influxdb/v2.5/admin/internals/tsi/inspect.md @@ -0,0 +1,251 @@ +--- +title: Inspect TSI indexes +description: > + Use the `influxd inspect` command to inspect the InfluxDB TSI index. +menu: + influxdb_2_5: + parent: Manage TSI indexes +related: + - /influxdb/v2.5/reference/internals/storage-engine/ + - /influxdb/v2.5/reference/internals/file-system-layout/ + - /influxdb/v2.5/reference/cli/influxd/inspect/dump-tsi/ + - /influxdb/v2.5/reference/cli/influxd/inspect/export-index/ + - /influxdb/v2.5/reference/cli/influxd/inspect/report-tsi/ +--- + +Use the `influxd inspect` command to inspect the InfluxDB [time series index (TSI)](/influxdb/v2.5/reference/internals/storage-engine/#time-series-index-tsi). + +- [Output information about TSI index files](#output-information-about-tsi-index-files) + - [Output raw series data stored in the index](#output-raw-series-data-stored-in-the-index) + - [Output measurement data stored in the index](#output-measurement-data-stored-in-the-index) +- [Export TSI index data as SQL](#export-tsi-index-data-as-sql) +- [Report the cardinality of TSI files](#report-the-cardinality-of-tsi-files) + +## Output information about TSI index files + +Use the [`influxd inspect dump-tsi` command](/influxdb/v2.5/reference/cli/influxd/inspect/dump-tsi/) +to output low-level details about TSI index (`tsi1`) files. + +Provide the following: + +- ({{< req >}}) `--series-file` flag with the path to bucket's + [`_series` directory](/influxdb/v2.5/reference/internals/file-system-layout/#tsm-directories-and-files-layout). +- ({{< req >}}) Path to the shard's + [`index` directory](/influxdb/v2.5/reference/internals/file-system-layout/#tsm-directories-and-files-layout) + +```sh +influxd inspect dump-tsi \ + --series-file ~/.influxdbv2/engine/data/056d83f962a08461/_series \ + ~/.influxdbv2/engine/data/056d83f962a08461/autogen/1023/index +``` + +{{< expand-wrapper >}} +{{% expand "View example output" %}} +``` +[LOG FILE] L0-00000006.tsl +Series: 0 +Measurements: 0 +Tag Keys: 0 +Tag Values: 0 + +[INDEX FILE] L3-00000008.tsi +Measurements: 3 + Series data size: 0 (0.0b) + Bytes per series: 0.0b +Tag Keys: 15 +Tag Values: 1025 + Series: 1700 + Series data size: 0 (0.0b) + Bytes per series: 0.0b + +[LOG FILE] L0-00000010.tsl +Series: 0 +Measurements: 0 +Tag Keys: 0 +Tag Values: 0 + +[INDEX FILE] L2-00000011.tsi +Measurements: 1 + Series data size: 0 (0.0b) + Bytes per series: 0.0b +Tag Keys: 5 +Tag Values: 9 + Series: 10 + Series data size: 0 (0.0b) + Bytes per series: 0.0b +``` +{{% /expand %}} +{{< /expand-wrapper >}} + +### Output raw series data stored in the index + +To output raw series data stored in index files, include the `--series` flag with +the `influxd inspect dump-tsi` command: + +```sh +influxd inspect dump-tsi \ + --series \ + --series-file ~/.influxdbv2/engine/data/056d83f962a08461/_series \ + ~/.influxdbv2/engine/data/056d83f962a08461/autogen/1023/index +``` + +{{< expand-wrapper >}} +{{% expand "View example output" %}} +``` +earthquake,code=6000iuad,id=us6000iuad,magType=mww,net=us,title=M\ 5.2\ -\ 101\ km\ SE\ of\ Palca\,\ Peru +earthquake,code=71377273,id=pr71377273,magType=md,net=pr,title=M\ 1.9\ -\ Puerto\ Rico\ region +earthquake,code=73794611,id=nc73794611,magType=md,net=nc,title=M\ 0.6\ -\ 13km\ ESE\ of\ Mammoth\ Lakes\,\ CA +earthquake,code=40361800,id=ci40361800,magType=ml,net=ci,title=M\ 1.3\ -\ 12km\ SE\ of\ Olancha\,\ CA +earthquake,code=6000itfk,id=us6000itfk,magType=mb,net=us,title=M\ 4.4\ -\ Mindanao\,\ Philippines +earthquake,code=2022ucrr,id=ok2022ucrr,magType=ml,net=ok,title=M\ 1.4\ -\ 4\ km\ SSE\ of\ Dover\,\ Oklahoma +earthquake,code=73792706,id=nc73792706,magType=md,net=nc,title=M\ 0.6\ -\ 7km\ W\ of\ Cobb\,\ CA +earthquake,code=6000isjn,id=us6000isjn,magType=mww,net=us,title=M\ 5.5\ -\ 69\ km\ E\ of\ Hualien\ City\,\ Taiwan +earthquake,code=022d8mp4dd,id=ak022d8mp4dd,magType=ml,net=ak,title=M\ 1.3\ -\ Southern\ Alaska +earthquake,code=022dbrb8vb,id=ak022dbrb8vb,magType=ml,net=ak,title=M\ 1.6\ -\ 37\ km\ NE\ of\ Paxson\,\ Alaska +earthquake,code=6000iu2e,id=us6000iu2e,magType=mb,net=us,title=M\ 4.1\ -\ 81\ km\ WSW\ of\ San\ Antonio\ de\ los\ Cobres\,\ Argentina +``` +{{% /expand %}} +{{< /expand-wrapper >}} + +### Output measurement data stored in the index + +To output measurement information stored in index files, include the `--measurement` +flag with the `influxd inspect dump-tsi` command: + +```sh +influxd inspect dump-tsi \ + --measurements \ + --series-file ~/.influxdbv2/engine/data/056d83f962a08461/_series \ + ~/.influxdbv2/engine/data/056d83f962a08461/autogen/1023/index +``` + +{{< expand-wrapper >}} +{{% expand "View example output" %}} +``` +Measurement +earthquake +explosion +quarry blast + + +Measurement +earthquake +explosion +ice quake +quarry blast + + +Measurement +earthquake +explosion +``` +{{% /expand %}} +{{< /expand-wrapper >}} + +## Export TSI index data as SQL + +Use the [`influxd inspect export-index` command](/influxdb/v2.5/reference/cli/influxd/inspect/export-index/) +to export an index in SQL format for easier inspection and debugging. +Provide the following: + +- `--series-path` flag with the path to the bucket's + [`_series` directory](/influxdb/v2.5/reference/internals/file-system-layout/#tsm-directories-and-files-layout). +- `--index-path` flag with the path to the shard's + [`index` directory](/influxdb/v2.5/reference/internals/file-system-layout/#tsm-directories-and-files-layout). + +```sh +influxd inspect export-index \ + --series-path ~/.influxdbv2/engine/data/056d83f962a08461/_series \ + --index-path ~/.influxdbv2/engine/data/056d83f962a08461/autogen/1023/index +``` + +{{< expand-wrapper >}} +{{% expand "View example output" %}} +```sql +CREATE TABLE IF NOT EXISTS measurement_series ( + name TEXT NOT NULL, + series_id INTEGER NOT NULL +); + +CREATE TABLE IF NOT EXISTS tag_value_series ( + name TEXT NOT NULL, + key TEXT NOT NULL, + value TEXT NOT NULL, + series_id INTEGER NOT NULL +); + +BEGIN TRANSACTION; +INSERT INTO measurement_series (name, series_id) VALUES ('earthquake', 26920); +INSERT INTO measurement_series (name, series_id) VALUES ('earthquake', 26928); +INSERT INTO measurement_series (name, series_id) VALUES ('earthquake', 26936); +INSERT INTO measurement_series (name, series_id) VALUES ('earthquake', 26944); +INSERT INTO measurement_series (name, series_id) VALUES ('earthquake', 26952); +INSERT INTO measurement_series (name, series_id) VALUES ('earthquake', 26960); +INSERT INTO measurement_series (name, series_id) VALUES ('earthquake', 26968); +INSERT INTO measurement_series (name, series_id) VALUES ('earthquake', 26976); +INSERT INTO measurement_series (name, series_id) VALUES ('earthquake', 26984); +INSERT INTO measurement_series (name, series_id) VALUES ('earthquake', 26992); +COMMIT; +``` +{{% /expand %}} +{{< /expand-wrapper >}} + +## Report the cardinality of TSI files + +Use the [`influxd inspect report-tsi` command](/influxdb/v2.5/reference/cli/influxd/inspect/report-tsi/) +to output information about the cardinality of data in a bucket's index. +Provide the following: + +- `--bucket-id` with the ID of the bucket. + +```sh +influxd inspect report-tsi --bucket-id 056d83f962a08461 +``` + +{{< expand-wrapper >}} +{{% expand "View example output" %}} +``` +Summary +Database Path: /Users/scottanderson/.influxdbv2/engine/data/056d83f962a08461 +Cardinality (exact): 101698 + +Measurement Cardinality (exact) + +"earthquake" 99876 +"quarry blast" 1160 +"explosion" 589 +"ice quake" 58 +"other event" 10 +"chemical explosion" 2 +"rock burst" 1 +"sonic boom" 1 +"volcanic eruption" 1 + + +=============== +Shard ID: 452 +Path: /Users/scottanderson/.influxdbv2/engine/data/056d83f962a08461/autogen/452 +Cardinality (exact): 1644 + +Measurement Cardinality (exact) + +"earthquake" 1607 +"quarry blast" 29 +"explosion" 7 +"sonic boom" 1 +=============== + +=============== +Shard ID: 453 +Path: /Users/scottanderson/.influxdbv2/engine/data/056d83f962a08461/autogen/453 +Cardinality (exact): 2329 + +Measurement Cardinality (exact) + +"earthquake" 2298 +"quarry blast" 24 +"explosion" 7 +=============== +``` +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/influxdb/v2.5/admin/internals/tsi/rebuild-index.md b/content/influxdb/v2.5/admin/internals/tsi/rebuild-index.md new file mode 100644 index 000000000..8b0da3c7f --- /dev/null +++ b/content/influxdb/v2.5/admin/internals/tsi/rebuild-index.md @@ -0,0 +1,100 @@ +--- +title: Rebuild the TSI index +description: > + Flush and rebuild the TSI index to purge corrupt index files or remove indexed + data that is out of date. +menu: + influxdb_2_5: + parent: Manage TSI indexes +weight: 201 +related: + - /influxdb/v2.5/reference/internals/storage-engine/ + - /influxdb/v2.5/reference/internals/file-system-layout/ + - /influxdb/v2.5/reference/cli/influxd/inspect/build-tsi/ +--- + +In some cases, it may be necessary to flush and rebuild the TSI index. +For example, purging corrupt index files or removing outdated indexed data. + +To rebuild your InfluxDB TSI index: + +1. **Stop the InfluxDB (`influxd`) process**. + + {{% warn %}} +Rebuilding the TSI index while the `influxd` is running could prevent some data +from being queryable. + {{% /warn %}} + +2. Navigate to the `data` directory in your + [InfluxDB engine path](/influxdb/v2.5/reference/internals/file-system-layout/). + _The engine path depends on your operating system or + [custom engine path setting](/influxdb/v2.5/reference/config-options/#engine-path)._ + + {{< code-tabs-wrapper >}} +{{% code-tabs %}} +[macOS & Linux](#) +[Windows (PowerShell)](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +cd ~/.influxdbv2/engine/data/ +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```powershell +cd -Path 'C:\%USERPROFILE%\.influxdbv2\engine\data\' +``` +{{% /code-tab-content %}} + {{< /code-tabs-wrapper >}} + +3. **Delete all `_series` directories in your InfluxDB `data` directory.** + By default, `_series` directories are are stored at `/data//_series`, + but check for and remove `_series` directories throughout the + `data` directory. + + {{< code-tabs-wrapper >}} +{{% code-tabs %}} +[macOS & Linux](#) +[Windows (PowerShell)](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +find . -type d -name _series -exec -delete +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```powershell +get-childitem -Include _series -Recurse -force | Remove-Item -Force -Recurse +``` +{{% /code-tab-content %}} + {{< /code-tabs-wrapper >}} + + +4. **Delete all `index` directories.** By default, `index` directories are stored at + `/data//autogen//index`, but check for and remove + `index` directories throughout the `data` directory. + + {{< code-tabs-wrapper >}} +{{% code-tabs %}} +[macOS & Linux](#) +[Windows (PowerShell)](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +find . -type d -name index -exec -delete +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```powershell +get-childitem -Include index -Recurse -force | Remove-Item -Force -Recurse +``` +{{% /code-tab-content %}} + {{< /code-tabs-wrapper >}} + + +5. Use the [`influxd inspect build-tsi` command](/influxdb/v2.5/reference/cli/influxd/inspect/build-tsi/) + to rebuild the TSI index. + + ```sh + influxd inspect build-tsi + ``` \ No newline at end of file diff --git a/content/influxdb/v2.5/admin/internals/tsm/_index.md b/content/influxdb/v2.5/admin/internals/tsm/_index.md new file mode 100644 index 000000000..3aa77c234 --- /dev/null +++ b/content/influxdb/v2.5/admin/internals/tsm/_index.md @@ -0,0 +1,28 @@ +--- +title: Manage InfluxDB TSM files +description: > + ... +menu: + influxdb_2_5: + name: Manage TSM files + parent: Manage internal systems +weight: 101 +draft: true +--- + + + +- influxd inspect delete-tsm Deletes a measurement from a raw tsm file. +- influxd inspect dump-tsm Dumps low-level details about tsm1 files +- influxd inspect export-lp Export TSM data as line protocol +- influxd inspect report-tsm Run TSM report +- influxd inspect verify-tombstone Verify the integrity of tombstone files +- influxd inspect verify-tsm Verifies the integrity of TSM files +- influxd inspect verify-wal Check for WAL corruption +- influxd inspect verify-tombstone Verify the integrity of tombstone files +- influxd inspect verify-seriesfile Verifies the integrity of series files. +- influxd inspect build-tsi --compact-series-file (Compact a series file without rebuilding the index) \ No newline at end of file diff --git a/content/influxdb/v2.5/admin/internals/wal/_index.md b/content/influxdb/v2.5/admin/internals/wal/_index.md new file mode 100644 index 000000000..b86c6b92a --- /dev/null +++ b/content/influxdb/v2.5/admin/internals/wal/_index.md @@ -0,0 +1,20 @@ +--- +title: Manage InfluxDB WAL files +description: > + ... +menu: + influxdb_2_5: + name: Manage WAL files + parent: Manage internal systems +weight: 101 +draft: true +--- + + + +dump-wal Dumps TSM data from WAL files +verify-wal Check for WAL corruption \ No newline at end of file diff --git a/content/influxdb/v2.5/admin/logs.md b/content/influxdb/v2.5/admin/logs.md new file mode 100644 index 000000000..f71e94b22 --- /dev/null +++ b/content/influxdb/v2.5/admin/logs.md @@ -0,0 +1,275 @@ +--- +title: Manage InfluxDB logs +description: > + Learn how to configure, manage, and process your InfluxDB logs. +menu: + influxdb_2_5: + name: Manage logs + parent: Administer InfluxDB +weight: 10 +--- + +Learn how to configure, manage, and process your InfluxDB logs: + +- [Configure your InfluxDB log location](#configure-your-influxdb-log-location) +- [Configure your log level](#configure-your-log-level) +- [Enable the Flux query log](#enable-the-flux-query-log) +- [Use external tools to manage and process logs](#use-external-tools-to-manage-and-process-logs) +- [Log formats](#log-formats) + +## Configure your InfluxDB log location + +By default, InfluxDB outputs all logs to **stdout**. To view InfluxDB logs, +view the output of the [`influxd`](/influxdb/v2.5/reference/cli/influxd/) process. + +- [Write logs to a file](#write-logs-to-a-file) +- [Logs when running InfluxDB as a service](#logs-when-running-influxdb-as-a-service) + +### Write logs to a file + +To write InfluxDB logs to a file, redirect **stdout** to a file when starting +the InfluxDB service ([`influxd`](/influxdb/v2.5/reference/cli/influxd/)). + +```sh +influxd 1> /path/to/influxdb.log +``` + +{{% note %}} +When logging to a file, InfluxDB uses the [logfmt](#logfmt) format. +{{% /note %}} + +### Logs when running InfluxDB as a service + +If you use a service manager to run InfluxDB, the service manager determines the location of logs. + +{{< tabs-wrapper >}} +{{% tabs %}} +[systemd](#) +[sysvinit](#) +{{% /tabs %}} + +{{% tab-content %}} + +Most Linux systems direct logs to the `systemd` journal. +To access these logs, use the following command: + +```sh +sudo journalctl -u influxdb.service +``` + +For more information, see the [journald.conf documentation](https://www.freedesktop.org/software/systemd/man/journald.conf.html). + +{{% /tab-content %}} + + +{{% tab-content %}} + +When InfluxDB is run as a service, **stdout** is discarded by default (sent to `/dev/null`). +To write logs to a file: + +1. Open the InfluxDB startup script (`/etc/default/influxdb`) in a text editor. +2. Set the `STDOUT` environment variable to the path where you want to store + the InfluxDB logs. For example: + + ```conf + STDOUT=/var/log/influxdb/influxd.log + ``` + +3. Save the changes to the startup script. +4. Restart the InfluxDB service to apply the changes. + + ```sh + service influxdb restart + ``` + +{{% /tab-content %}} + +{{< /tabs-wrapper >}} + +## Configure your log level + +Use the [`log-level` InfluxDB configuration option](/influxdb/v2.5/reference/config-options/#log-level) +to specify the log levels the InfluxDB service outputs. +InfluxDB supports the following log levels: + +- **debug**: Output logs with debug, info, and error log levels. +- **info**: _(Default)_ Output logs with info and error log levels. +- **error**: Output logs with the error log level only. + +{{< tabs-wrapper >}} +{{% tabs "small" %}} +[influxd flag](#) +[Environment variable](#) +[InfluxDB configuration file](#) +{{% /tabs %}} + +{{% tab-content %}} +```sh +influxd --log-level=info +``` +{{% /tab-content %}} + +{{% tab-content %}} +```sh +export INFLUXD_LOG_LEVEL=info +``` +{{% /tab-content %}} + +{{% tab-content %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +log-level: info +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +log-level = "info" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "log-level": "info" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /tab-content %}} + +{{< /tabs-wrapper >}} + +_For information about configuring InfluxDB, see [InfluxDB configuration options](/influxdb/v2.5/reference/config-options/)._ + +## Enable the Flux query log + +Use the [`flux-log-enabled` configuration option](/influxdb/v2.5/reference/config-options/#flux-log-enabled) +to enable Flux query logging. InfluxDB outputs Flux query logs to **stdout** +with all other InfluxDB logs. + +{{< tabs-wrapper >}} +{{% tabs "small" %}} +[influxd flag](#) +[Environment variable](#) +[InfluxDB configuration file](#) +{{% /tabs %}} + +{{% tab-content %}} +```sh +influxd --flux-log-enabled +``` +{{% /tab-content %}} + +{{% tab-content %}} +```sh +export INFLUXD_FLUX_LOG_ENABLED=true +``` +{{% /tab-content %}} + +{{% tab-content %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +flux-log-enabled: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +flux-log-enabled = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "flux-log-enabled": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /tab-content %}} + +{{< /tabs-wrapper >}} + +_For information about configuring InfluxDB, see [InfluxDB configuration options](/influxdb/v2.5/reference/config-options/)._ + + +## Use external tools to manage and process logs + +Use the following popular tools to manage and process InfluxDB logs: + +### logrotate + +[logrotate](https://github.com/logrotate/logrotate) simplifies the +administration of log files and provides automatic rotation compression, removal +and mailing of log files. Logrotate can be set to handle a log file hourly, +daily, weekly, monthly or when the log file gets to a certain size. + +### hutils + +[hutils](https://blog.heroku.com/hutils-explore-your-structured-data-logs) is a +collection of command line utilities for working with logs with [logfmt](#logfmt) +encoding, including: + +- **lcut**: Extracts values from a logfmt trace based on a specified field name. +- **lfmt**: Reformats and highlights key sections of logfmt lines. +- **ltap**: Accesses messages from log providers in a consistent way to allow + easy parsing by other utilities that operate on logfmt traces. +- **lviz**: Visualizes logfmt output by building a tree out of a dataset + combining common sets of key-value pairs into shared parent nodes. + +### lnav (Log File Navigator) + +[lnav (Log File Navigator)](http://lnav.org/) is an advanced log file viewer useful for watching +and analyzing log files from a terminal. +The lnav viewer provides a single log view, automatic log format detection, +filtering, timeline view, pretty-print view, and querying logs using SQL. + + +## Log formats + +InfluxDB outputs logs in one of two formats depending on the location of where +logs are output. + +- [Console/TTY](#consoletty) +- [logfmt](#logfmt) + +### Console/TTY + +**When logging to a terminal or other TTY devices**, InfluxDB uses a console-friendly format. + +##### Example console/TTY format +```sh +2022-09-29T21:58:29.936355Z info Welcome to InfluxDB {"log_id": "0dEoz3C0000", "version": "dev", "commit": "663d43d210", "build_date": "2022-09-29T21:58:29Z", "log_level": "info"} +2022-09-29T21:58:29.977671Z info Resources opened {"log_id": "0dEoz3C0000", "service": "bolt", "path": "/Users/exampleuser/.influxdbv2/influxd.bolt"} +2022-09-29T21:58:29.977891Z info Resources opened {"log_id": "0dEoz3C0000", "service": "sqlite", "path": "/Users/exampleuser/.influxdbv2/influxd.sqlite"} +2022-09-29T21:58:30.059709Z info Checking InfluxDB metadata for prior version. {"log_id": "0dEoz3C0000", "bolt_path": "/Users/exampleuser/.influxdbv2/influxd.bolt"} +``` + +### logfmt + +**When logging to a file**, InfluxDB uses **logfmt**, a machine-readable +structured log format that provides simpler integrations with external tools like +[Splunk](https://www.splunk.com/), [Papertrail](https://www.papertrail.com/), +[Elasticsearch](https://www.elastic.co/), and other third party tools. + +##### Example logfmt format +```sh +ts=2022-09-29T16:54:16.021427Z lvl=info msg="Welcome to InfluxDB" log_id=0dEYZvqG000 version=dev commit=663d43d210 build_date=2022-09-29T16:54:15Z log_level=info +ts=2022-09-29T16:54:16.062239Z lvl=info msg="Resources opened" log_id=0dEYZvqG000 service=bolt path=/Users/exampleuser/.influxdbv2/influxd.bolt +ts=2022-09-29T16:54:16.062457Z lvl=info msg="Resources opened" log_id=0dEYZvqG000 service=sqlite path=/Users/exampleuser/.influxdbv2/influxd.sqlite +ts=2022-09-29T16:54:16.144430Z lvl=info msg="Checking InfluxDB metadata for prior version." log_id=0dEYZvqG000 bolt_path=/Users/exampleuser/.influxdbv2/influxd.bolt +``` diff --git a/content/influxdb/v2.5/api-guide/_index.md b/content/influxdb/v2.5/api-guide/_index.md new file mode 100644 index 000000000..fca04185b --- /dev/null +++ b/content/influxdb/v2.5/api-guide/_index.md @@ -0,0 +1,41 @@ +--- +title: Develop with the InfluxDB API +seotitle: Use the InfluxDB API +description: Interact with InfluxDB 2.5 using a rich API for writing and querying data and more. +weight: 4 +menu: + influxdb_2_5: + name: Develop with the API +influxdb/v2.5/tags: [api] +--- + +The InfluxDB v2 API provides a programmatic interface for interactions with InfluxDB. +Access the InfluxDB API using the `/api/v2/` endpoint. + +## Developer guides + +- [API Quick Start](/influxdb/v2.5/api-guide/api_intro/) + +## InfluxDB client libraries + +InfluxDB client libraries are language-specific packages that integrate with the InfluxDB v2 API. +For tutorials and information about client libraries, see [InfluxDB client libraries](/{{< latest "influxdb" >}}/api-guide/client-libraries/). + +## InfluxDB v2 API documentation + +InfluxDB OSS {{< current-version >}} API documentation + +### View InfluxDB API documentation locally + +InfluxDB API documentation is built into the `influxd` service and represents +the API specific to the current version of InfluxDB. +To view the API documentation locally, [start InfluxDB](/influxdb/v2.5/get-started/#start-influxdb) +and visit the `/docs` endpoint in a browser ([localhost:8086/docs](http://localhost:8086/docs)). + +## InfluxDB v1 compatibility API documentation + +The InfluxDB v2 API includes [InfluxDB 1.x compatibility endpoints](/influxdb/v2.5/reference/api/influxdb-1x/) +that work with InfluxDB 1.x client libraries and third-party integrations like +[Grafana](https://grafana.com) and others. + +View full v1 compatibility API documentation diff --git a/content/influxdb/v2.5/api-guide/api_intro.md b/content/influxdb/v2.5/api-guide/api_intro.md new file mode 100644 index 000000000..9de4d54bf --- /dev/null +++ b/content/influxdb/v2.5/api-guide/api_intro.md @@ -0,0 +1,75 @@ +--- +title: API Quick Start +seotitle: Use the InfluxDB API +description: Interact with InfluxDB using a rich API for writing and querying data and more. +weight: 3 +menu: + influxdb_2_5: + name: Quick start + parent: Develop with the API +aliases: + - /influxdb/v2.5/tools/api/ +influxdb/cloud/tags: [api] +--- + +InfluxDB offers a rich API and [client libraries](/influxdb/v2.5/api-guide/client-libraries) ready to integrate with your application. Use popular tools like Curl and [Postman](/influxdb/v2.5/api-guide/postman) for rapidly testing API requests. + +This section will guide you through the most commonly used API methods. + +For detailed documentation on the entire API, see [InfluxDBv2 API Reference](/influxdb/v2.5/reference/api/#influxdb-v2-api-documentation). + +{{% note %}} +If you need to use InfluxDB {{< current-version >}} with **InfluxDB 1.x** API clients and integrations, see the [1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). +{{% /note %}} + +## Bootstrap your application + +With most API requests, you'll need to provide a minimum of your InfluxDB URL, Organization, and Authorization Token. + +[Install InfluxDB OSS v2.x](/influxdb/v2.5/install/) or upgrade to +an [InfluxDB Cloud account](/influxdb/cloud/sign-up). + +### Authentication + +InfluxDB uses [API tokens](/influxdb/v2.5/security/tokens/) to authorize API requests. + +1. Before exploring the API, use the InfluxDB UI to +[create an initial API token](/influxdb/v2.5/security/tokens/create-token/) for your application. + +2. Include your API token in an `Authentication: Token YOUR_API_TOKEN` HTTP header with each request. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[curl](#curl) +[Node.js](#nodejs) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +{{% get-shared-text "api/v2.0/auth/oss/token-auth.sh" %}} +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +{{% get-shared-text "api/v2.0/auth/oss/token-auth.js" %}} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +Postman is another popular tool for exploring APIs. See how to [send authenticated requests with Postman](/{{< latest "influxdb" >}}/api-guide/postman/#send-authenticated-api-requests-with-postman). + +## Buckets API + +Before writing data you'll need to create a Bucket in InfluxDB. +[Create a bucket](/influxdb/v2.5/organizations/buckets/create-bucket/#create-a-bucket-using-the-influxdb-api) using an HTTP request to the InfluxDB API `/buckets` endpoint. + +```sh +{{% get-shared-text "api/v2.0/buckets/oss/create.sh" %}} +``` + +## Write API + +[Write data to InfluxDB](/influxdb/v2.5/write-data/developer-tools/api/) using an HTTP request to the InfluxDB API `/api/v2/write` endpoint. + +## Query API + +[Query from InfluxDB](/influxdb/v2.5/query-data/execute-queries/influx-api/) using an HTTP request to the `/api/v2/query` endpoint. diff --git a/content/influxdb/v2.5/api-guide/client-libraries/_index.md b/content/influxdb/v2.5/api-guide/client-libraries/_index.md new file mode 100644 index 000000000..53e09f52d --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/_index.md @@ -0,0 +1,27 @@ +--- +title: Use InfluxDB client libraries +description: > + InfluxDB client libraries are language-specific tools that integrate with the InfluxDB v2 API. + View the list of available client libraries. +weight: 101 +aliases: + - /influxdb/v2.5/reference/client-libraries/ + - /influxdb/v2.5/reference/api/client-libraries/ + - /influxdb/v2.5/tools/client-libraries/ + - /influxdb/v2.x/api-guide/client-libraries/ +menu: + influxdb_2_5: + name: Client libraries + parent: Develop with the API +influxdb/v2.5/tags: [client libraries] +--- + +InfluxDB client libraries are language-specific packages that integrate with the InfluxDB v2 API. +The following **InfluxDB v2** client libraries are available: + +{{% note %}} +These client libraries are in active development and may not be feature-complete. +This list will continue to grow as more client libraries are released. +{{% /note %}} + +{{< children type="list" >}} diff --git a/content/influxdb/v2.5/api-guide/client-libraries/arduino.md b/content/influxdb/v2.5/api-guide/client-libraries/arduino.md new file mode 100644 index 000000000..bcd9dfa79 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/arduino.md @@ -0,0 +1,21 @@ +--- +title: Arduino client library +seotitle: Use the InfluxDB Arduino client library +list_title: Arduino +description: Use the InfluxDB Arduino client library to interact with InfluxDB. +external_url: https://github.com/tobiasschuerg/InfluxDB-Client-for-Arduino +list_note: _– contributed by [tobiasschuerg](https://github.com/tobiasschuerg)_ +menu: + influxdb_2_5: + name: Arduino + parent: Client libraries + params: + url: https://github.com/tobiasschuerg/InfluxDB-Client-for-Arduino +weight: 201 +--- + +Arduino is an open-source hardware and software platform used for building electronics projects. + +The documentation for this client library is available on GitHub. + +Arduino InfluxDB client \ No newline at end of file diff --git a/content/influxdb/v2.5/api-guide/client-libraries/browserjs.md b/content/influxdb/v2.5/api-guide/client-libraries/browserjs.md new file mode 100644 index 000000000..0c2f5e474 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/browserjs.md @@ -0,0 +1,117 @@ +--- +title: JavaScript client library for web browsers +seotitle: Use the InfluxDB JavaScript client library for web browsers +list_title: JavaScript for browsers +description: > + Use the InfluxDB JavaScript client library to interact with InfluxDB in web clients. +menu: + influxdb_2_5: + name: JavaScript for browsers + identifier: client_js_browsers + parent: Client libraries +influxdb/v2.5/tags: [client libraries, JavaScript] +weight: 201 +aliases: + - /influxdb/v2.5/reference/api/client-libraries/browserjs/ + - /influxdb/v2.5/api-guide/client-libraries/browserjs/write + - /influxdb/v2.5/api-guide/client-libraries/browserjs/query +related: + - /influxdb/v2.5/api-guide/client-libraries/nodejs/write/ + - /influxdb/v2.5/api-guide/client-libraries/nodejs/query/ +--- + +Use the [InfluxDB JavaScript client library](https://github.com/influxdata/influxdb-client-js) to interact with the InfluxDB API in browsers and front-end clients. This library supports both front-end and server-side environments and provides the following distributions: +* ECMAScript modules (ESM) and CommonJS modules (CJS) +* Bundled ESM +* Bundled UMD + +This guide presumes some familiarity with JavaScript, browser environments, and InfluxDB. +If you're just getting started with InfluxDB, see [Get started with InfluxDB](/{{% latest "influxdb" %}}/get-started/). + +{{% warn %}} +### Tokens in production applications +{{% api/browser-token-warning %}} +{{% /warn %}} + +* [Before you begin](#before-you-begin) +* [Use with module bundlers](#use-with-module-bundlers) +* [Use bundled distributions with browsers and module loaders](#use-bundled-distributions-with-browsers-and-module-loaders) +* [Get started with the example app](#get-started-with-the-example-app) + +## Before you begin + +1. Install [Node.js](https://nodejs.org/en/download/package-manager/) to serve your front-end app. + +2. Ensure that InfluxDB is running and you can connect to it. + For information about what URL to use to connect to InfluxDB OSS or InfluxDB Cloud, see [InfluxDB URLs](/{{% latest "influxdb" %}}/reference/urls/). + +## Use with module bundlers + +If you use a module bundler like Webpack or Parcel, install `@influxdata/influxdb-client-browser`. +For more information and examples, see [Node.js](/{{% latest "influxdb" %}}/api-guide/client-libraries/nodejs/). + +## Use bundled distributions with browsers and module loaders + +1. Configure InfluxDB properties for your script. + + ```html + + ``` + +2. Import modules from the latest client library browser distribution. +`@influxdata/influxdb-client-browser` exports bundled ESM and UMD syntaxes. + + {{< code-tabs-wrapper >}} + {{% code-tabs %}} + [ESM](#import-esm) + [UMD](#import-umd) + {{% /code-tabs %}} + {{% code-tab-content %}} + ```html + + ``` + {{% /code-tab-content %}} + {{% code-tab-content %}} + ```html + + + ``` + {{% /code-tab-content %}} + {{< /code-tabs-wrapper >}} + +After you've imported the client library, you're ready to [write data](/{{% latest "influxdb" %}}/api-guide/client-libraries/nodejs/write/?t=nodejs) to InfluxDB. + +## Get started with the example app + +This library includes an example browser app that queries from and writes to your InfluxDB instance. + +1. Clone the [influxdb-client-js](https://github.com/influxdata/influxdb-client-js) repo. + +2. Navigate to the `examples` directory: + ```js + cd examples + ``` + +3. Update `./env_browser.js` with your InfluxDB [url](/{{% latest "influxdb" %}}/reference/urls/), [bucket](/{{% latest "influxdb" %}}/organizations/buckets/), [organization](/{{% latest "influxdb" %}}/organizations/), and [token](/{{% latest "influxdb" %}}/security/tokens/) + +4. Run the following command to start the application at [http://localhost:3001/examples/index.html]() + + ```sh + npm run browser + ``` + + `index.html` loads the `env_browser.js` configuration, the client library ESM modules, and the application in your browser. diff --git a/content/influxdb/v2.5/api-guide/client-libraries/csharp.md b/content/influxdb/v2.5/api-guide/client-libraries/csharp.md new file mode 100644 index 000000000..279d3fd7b --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/csharp.md @@ -0,0 +1,20 @@ +--- +title: C# client library +list_title: C# +seotitle: Use the InfluxDB C# client library +description: Use the InfluxDB C# client library to interact with InfluxDB. +external_url: https://github.com/influxdata/influxdb-client-csharp +menu: + influxdb_2_5: + name: C# + parent: Client libraries + params: + url: https://github.com/influxdata/influxdb-client-csharp +weight: 201 +--- + +C# is a general-purpose object-oriented programming language. + +The documentation for this client library is available on GitHub. + +C# InfluxDB client \ No newline at end of file diff --git a/content/influxdb/v2.5/api-guide/client-libraries/dart.md b/content/influxdb/v2.5/api-guide/client-libraries/dart.md new file mode 100644 index 000000000..2cdbcaa49 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/dart.md @@ -0,0 +1,20 @@ +--- +title: Dart client library +list_title: Dart +seotitle: Use the InfluxDB Dart client library +description: Use the InfluxDB Dart client library to interact with InfluxDB. +external_url: https://github.com/influxdata/influxdb-client-dart +menu: + influxdb_2_5: + name: Dart + parent: Client libraries + params: + url: https://github.com/influxdata/influxdb-client-dart +weight: 201 +--- + +Dart is a programming language created for quick application development for both web and mobile apps. + +The documentation for this client library is available on GitHub. + +Dart InfluxDB client \ No newline at end of file diff --git a/content/influxdb/v2.5/api-guide/client-libraries/go.md b/content/influxdb/v2.5/api-guide/client-libraries/go.md new file mode 100644 index 000000000..1fe3a05e2 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/go.md @@ -0,0 +1,206 @@ +--- +title: Go client library +seotitle: Use the InfluxDB Go client library +list_title: Go +description: > + Use the InfluxDB Go client library to interact with InfluxDB. +menu: + influxdb_2_5: + name: Go + parent: Client libraries +influxdb/v2.5/tags: [client libraries, Go] +weight: 201 +aliases: + - /influxdb/v2.5/reference/api/client-libraries/go/ + - /influxdb/v2.5/tools/client-libraries/go/ +--- + +Use the [InfluxDB Go client library](https://github.com/influxdata/influxdb-client-go) to integrate InfluxDB into Go scripts and applications. + +This guide presumes some familiarity with Go and InfluxDB. +If just getting started, see [Get started with InfluxDB](/influxdb/v2.5/get-started/). + +## Before you begin + +1. [Install Go 1.13 or later](https://golang.org/doc/install). +2. Add the client package your to your project dependencies. + + ```sh + # Add InfluxDB Go client package to your project go.mod + go get github.com/influxdata/influxdb-client-go/v2 + ``` +3. Ensure that InfluxDB is running and you can connect to it. + For information about what URL to use to connect to InfluxDB OSS or InfluxDB Cloud, see [InfluxDB URLs](/influxdb/v2.5/reference/urls/). + +## Boilerplate for the InfluxDB Go Client Library + +Use the Go library to write and query data from InfluxDB. + +1. In your Go program, import the necessary packages and specify the entry point of your executable program. + + ```go + package main + + import ( + "context" + "fmt" + "time" + + "github.com/influxdata/influxdb-client-go/v2" + ) + ``` + +2. Define variables for your InfluxDB [bucket](/influxdb/v2.5/organizations/buckets/), [organization](/influxdb/v2.5/organizations/), and [token](/influxdb/v2.5/security/tokens/). + + ```go + bucket := "example-bucket" + org := "example-org" + token := "example-token" + // Store the URL of your InfluxDB instance + url := "http://localhost:8086" + ``` + +3. Create the the InfluxDB Go client and pass in the `url` and `token` parameters. + + ```go + client := influxdb2.NewClient(url, token) + ``` + +4. Create a **write client** with the `WriteAPIBlocking` method and pass in the `org` and `bucket` parameters. + + ```go + writeAPI := client.WriteAPIBlocking(org, bucket) + ``` + +5. To query data, create an InfluxDB **query client** and pass in your InfluxDB `org`. + + ```go + queryAPI := client.QueryAPI(org) + ``` + +## Write data to InfluxDB with Go + +Use the Go library to write data to InfluxDB. + +1. Create a [point](/influxdb/v2.5/reference/glossary/#point) and write it to InfluxDB using the `WritePoint` method of the API writer struct. + +2. Close the client to flush all pending writes and finish. + + ```go + p := influxdb2.NewPoint("stat", + map[string]string{"unit": "temperature"}, + map[string]interface{}{"avg": 24.5, "max": 45}, + time.Now()) + writeAPI.WritePoint(context.Background(), p) + client.Close() + ``` + +### Complete example write script + +```go +func main() { + bucket := "example-bucket" + org := "example-org" + token := "example-token" + // Store the URL of your InfluxDB instance + url := "http://localhost:8086" + // Create new client with default option for server url authenticate by token + client := influxdb2.NewClient(url, token) + // User blocking write client for writes to desired bucket + writeAPI := client.WriteAPIBlocking(org, bucket) + // Create point using full params constructor + p := influxdb2.NewPoint("stat", + map[string]string{"unit": "temperature"}, + map[string]interface{}{"avg": 24.5, "max": 45}, + time.Now()) + // Write point immediately + writeAPI.WritePoint(context.Background(), p) + // Ensures background processes finishes + client.Close() +} +``` +## Query data from InfluxDB with Go +Use the Go library to query data to InfluxDB. + +1. Create a Flux query and supply your `bucket` parameter. + + ```js + from(bucket:"") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "stat") + ``` + + The query client sends the Flux query to InfluxDB and returns the results as a FluxRecord object with a table structure. + +**The query client includes the following methods:** + +- `Query`: Sends the Flux query to InfluxDB. +- `Next`: Iterates over the query response. +- `TableChanged`: Identifies when the group key changes. +- `Record`: Returns the last parsed FluxRecord and gives access to value and row properties. +- `Value`: Returns the actual field value. + +```go +result, err := queryAPI.Query(context.Background(), `from(bucket:"") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "stat")`) +if err == nil { + for result.Next() { + if result.TableChanged() { + fmt.Printf("table: %s\n", result.TableMetadata().String()) + } + fmt.Printf("value: %v\n", result.Record().Value()) + } + if result.Err() != nil { + fmt.Printf("query parsing error: %s\n", result.Err().Error()) + } +} else { + panic(err) +} +``` + +**The FluxRecord object includes the following methods for accessing your data:** + +- `Table()`: Returns the index of the table the record belongs to. +- `Start()`: Returns the inclusive lower time bound of all records in the current table. +- `Stop()`: Returns the exclusive upper time bound of all records in the current table. +- `Time()`: Returns the time of the record. +- `Value() `: Returns the actual field value. +- `Field()`: Returns the field name. +- `Measurement()`: Returns the measurement name of the record. +- `Values()`: Returns a map of column values. +- `ValueByKey()`: Returns a value from the record for given column key. + +### Complete example query script + +```go + func main() { + // Create client + client := influxdb2.NewClient(url, token) + // Get query client + queryAPI := client.QueryAPI(org) + // Get QueryTableResult + result, err := queryAPI.Query(context.Background(), `from(bucket:"my-bucket")|> range(start: -1h) |> filter(fn: (r) => r._measurement == "stat")`) + if err == nil { + // Iterate over query response + for result.Next() { + // Notice when group key has changed + if result.TableChanged() { + fmt.Printf("table: %s\n", result.TableMetadata().String()) + } + // Access data + fmt.Printf("value: %v\n", result.Record().Value()) + } + // Check for an error + if result.Err() != nil { + fmt.Printf("query parsing error: %s\n", result.Err().Error()) + } + } else { + panic(err) + } + // Ensures background processes finishes + client.Close() +} +``` + +For more information, see the [Go client README on GitHub](https://github.com/influxdata/influxdb-client-go). diff --git a/content/influxdb/v2.5/api-guide/client-libraries/java.md b/content/influxdb/v2.5/api-guide/client-libraries/java.md new file mode 100644 index 000000000..37706b9fd --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/java.md @@ -0,0 +1,20 @@ +--- +title: Java client library +seotitle: Use the InfluxDB Java client library +list_title: Java +description: Use the Java client library to interact with InfluxDB. +external_url: https://github.com/influxdata/influxdb-client-java +menu: + influxdb_2_5: + name: Java + parent: Client libraries + params: + url: https://github.com/influxdata/influxdb-client-java +weight: 201 +--- + +Java is one of the oldest and most popular class-based, object-oriented programming languages. + +The documentation for this client library is available on GitHub. + +Java InfluxDB client \ No newline at end of file diff --git a/content/influxdb/v2.5/api-guide/client-libraries/kotlin.md b/content/influxdb/v2.5/api-guide/client-libraries/kotlin.md new file mode 100644 index 000000000..79eb31c33 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/kotlin.md @@ -0,0 +1,20 @@ +--- +title: Kotlin client library +seotitle: Use the Kotlin client library +list_title: Kotlin +description: Use the InfluxDB Kotlin client library to interact with InfluxDB. +external_url: https://github.com/influxdata/influxdb-client-java/tree/master/client-kotlin +menu: + influxdb_2_5: + name: Kotlin + parent: Client libraries + params: + url: https://github.com/influxdata/influxdb-client-java/tree/master/client-kotlin +weight: 201 +--- + +Kotlin is an open-source programming language that runs on the Java Virtual Machine (JVM). + +The documentation for this client library is available on GitHub. + +Kotlin InfluxDB client \ No newline at end of file diff --git a/content/influxdb/v2.5/api-guide/client-libraries/nodejs/_index.md b/content/influxdb/v2.5/api-guide/client-libraries/nodejs/_index.md new file mode 100644 index 000000000..7e1560778 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/nodejs/_index.md @@ -0,0 +1,23 @@ +--- +title: Node.js JavaScript client library +seotitle: Use the InfluxDB JavaScript client library +list_title: Node.js +description: > + Use the InfluxDB Node.js JavaScript client library to interact with InfluxDB. +menu: + influxdb_2_5: + name: Node.js + parent: Client libraries +influxdb/v2.5/tags: [client libraries, JavaScript] +weight: 201 +aliases: + - /influxdb/v2.5/reference/api/client-libraries/nodejs/ + - /influxdb/v2.5/reference/api/client-libraries/js/ +--- + +Use the [InfluxDB JavaScript client library](https://github.com/influxdata/influxdb-client-js) to integrate InfluxDB into your Node.js application. +In this guide, you'll start a Node.js project from scratch and code some simple API operations. + +{{< children >}} + +{{% api/v2dot0/nodejs/learn-more %}} diff --git a/content/influxdb/v2.5/api-guide/client-libraries/nodejs/install.md b/content/influxdb/v2.5/api-guide/client-libraries/nodejs/install.md new file mode 100644 index 000000000..98bbe0ff2 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/nodejs/install.md @@ -0,0 +1,97 @@ +--- +title: Install the InfluxDB JavaScript client library +seotitle: Install the InfluxDB Node.js JavaScript client library +description: > + Install the JavaScript client library to interact with the InfluxDB API in Node.js. +menu: + influxdb_2_5: + name: Install + parent: Node.js +influxdb/v2.5/tags: [client libraries, JavaScript] +weight: 100 +aliases: + - /influxdb/v2.5/reference/api/client-libraries/nodejs/install +--- + + +## Install Node.js + +1. Install [Node.js](https://nodejs.org/en/download/package-manager/). + +2. Ensure that InfluxDB is running and you can connect to it. + For information about what URL to use to connect to InfluxDB OSS or InfluxDB Cloud, see [InfluxDB URLs](/influxdb/v2.5/reference/urls/). + +3. Start a new Node.js project. + The `npm` package manager is included with Node.js. + + ```sh + npm init -y influx-node-app + ``` + +## Install TypeScript + +Many of the client library examples use [TypeScript](https://www.typescriptlang.org/). Follow these steps to initialize the TypeScript project. + +1. Install TypeScript and type definitions for Node.js. + + ```sh + npm i -g typescript && npm i --save-dev @types/node + ``` +2. Create a TypeScript configuration with default values. + + ```sh + tsc --init + ``` +3. Run the TypeScript compiler. To recompile your code automatically as you make changes, pass the `watch` flag to the compiler. + + ```sh + tsc -w -p + ``` + +## Install dependencies + +The JavaScript client library contains two packages: `@influxdata/influxdb-client` and `@influxdata/influxdb-client-apis`. +Add both as dependencies of your project. + +1. Open a new terminal window and install `@influxdata/influxdb-client` for querying and writing data: + + ```sh + npm install --save @influxdata/influxdb-client + ``` + +3. Install `@influxdata/influxdb-client-apis` for access to the InfluxDB management APIs: + + ```sh + npm install --save @influxdata/influxdb-client-apis + ``` + +## Next steps + +Once you've installed the Javascript client library, you're ready to [write data](/influxdb/v2.5/api-guide/client-libraries/nodejs/write/) to InfluxDB or [get started](#get-started-with-examples) with other examples from the client library. + +## Get started with examples + +{{% note %}} +The client examples include an [`env`](https://github.com/influxdata/influxdb-client-js/blob/master/examples/env.js) module for accessing your InfluxDB properties from environment variables or from `env.js`. +The examples use these properties to interact with the InfluxDB API. +{{% /note %}} + +1. Set environment variables or update `env.js` with your InfluxDB [bucket](/influxdb/v2.5/organizations/buckets/), [organization](/influxdb/v2.5/organizations/), [token](/influxdb/v2.5/security/tokens/), and [url](/influxdb/v2.5/reference/urls/). + + ```sh + export INFLUX_URL=http://localhost:8086 + export INFLUX_TOKEN=YOUR_API_TOKEN + export INFLUX_ORG=YOUR_ORG + export INFLUX_BUCKET=YOUR_BUCKET + ``` + Replace the following: + - *`YOUR_API_TOKEN`*: InfluxDB API token + - *`YOUR_ORG`*: InfluxDB organization ID + - *`YOUR_BUCKET`*: InfluxDB bucket name + +2. Run an example script. + + ```sh + query.ts + ``` +{{% api/v2dot0/nodejs/learn-more %}} diff --git a/content/influxdb/v2.5/api-guide/client-libraries/nodejs/query.md b/content/influxdb/v2.5/api-guide/client-libraries/nodejs/query.md new file mode 100644 index 000000000..d2d313abe --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/nodejs/query.md @@ -0,0 +1,94 @@ +--- +title: Query data with the InfluxDB JavaScript client library +description: > + Use the JavaScript client library to query data with the InfluxDB API in Node.js. +menu: + influxdb_2_5: + name: Query + parent: Node.js +influxdb/v2.5/tags: [client libraries, JavaScript] +weight: 201 +aliases: + - /influxdb/v2.5/reference/api/client-libraries/nodejs/query +--- + +Use the [InfluxDB JavaScript client library](https://github.com/influxdata/influxdb-client-js) in a Node.js environment to query InfluxDB. + +The following example sends a Flux query to an InfluxDB bucket and outputs rows from an observable table. + +## Before you begin + +- [Install the client library and other dependencies](/influxdb/v2.5/api-guide/client-libraries/nodejs/install/). + +## Query InfluxDB + +1. Change to your new project directory and create a file for your query module. + + ```sh + cd influx-node-app && touch query.js + ``` + +2. Instantiate an `InfluxDB` client. Provide your InfluxDB URL and API token. + Use the `getQueryApi()` method of the client. + Provide your InfluxDB organization ID to create a configured **query client**. + + ```js + import { InfluxDB, Point } from '@influxdata/influxdb-client' + + const queryApi = new InfluxDB({YOUR_URL, YOUR_API_TOKEN}).getQueryApi(YOUR_ORG) + ``` + + Replace the following: + - *`YOUR_URL`*: InfluxDB URL + - *`YOUR_API_TOKEN`*: InfluxDB API token + - *`YOUR_ORG`*: InfluxDB organization ID + +3. Create a Flux query for your InfluxDB bucket. Store the query as a string variable. + {{% warn %}} + To prevent SQL injection attacks, avoid concatenating unsafe user input with queries. + {{% /warn %}} + + ```js + const fluxQuery = + 'from(bucket: "YOUR_BUCKET") + |> range(start: 0) + |> filter(fn: (r) => r._measurement == "temperature")' + ``` + Replace *`YOUR_BUCKET`* with the name of your InfluxDB bucket. + +4. Use the `queryRows()` method of the query client to query InfluxDB. + `queryRows()` takes a Flux query and an [RxJS **Observer**](http://reactivex.io/rxjs/manual/overview.html#observer) object. + The client returns [table](/{{% latest "influxdb" %}}/reference/syntax/annotated-csv/#tables) metadata and rows as an [RxJS **Observable**](http://reactivex.io/rxjs/manual/overview.html#observable). + `queryRows()` subscribes your observer to the observable. + Finally, the observer logs the rows from the response to the terminal. + + ```js + const observer = { + next(row, tableMeta) { + const o = tableMeta.toObject(row) + console.log( + `${o._time} ${o._measurement} in '${o.location}' (${o.sensor_id}): ${o._field}=${o._value}` + ) + } + } + + queryApi.queryRows(fluxQuery, observer) + + ``` + +### Complete example + +```js +{{% get-shared-text "api/v2.0/query/query.mjs" %}} +``` + +To run the example from a file, set your InfluxDB environment variables and use `node` to execute the JavaScript file. + +```sh +export INFLUX_URL=http://localhost:8086 && \ +export INFLUX_TOKEN=YOUR_API_TOKEN && \ +export INFLUX_ORG=YOUR_ORG && \ +node query.js +``` + +{{% api/v2dot0/nodejs/learn-more %}} diff --git a/content/influxdb/v2.5/api-guide/client-libraries/nodejs/write.md b/content/influxdb/v2.5/api-guide/client-libraries/nodejs/write.md new file mode 100644 index 000000000..7a342ff71 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/nodejs/write.md @@ -0,0 +1,117 @@ +--- +title: Write data with the InfluxDB JavaScript client library +description: > + Use the JavaScript client library to write data with the InfluxDB API in Node.js. +menu: + influxdb_2_5: + name: Write + parent: Node.js +influxdb/v2.5/tags: [client libraries, JavaScript] +weight: 101 +aliases: + - /influxdb/v2.5/reference/api/client-libraries/nodejs/write +related: + - /influxdb/v2.5/write-data/troubleshoot/ +--- + +Use the [InfluxDB Javascript client library](https://github.com/influxdata/influxdb-client-js) to write data from a Node.js environment to InfluxDB. + +The Javascript client library includes the following convenient features for writing data to InfluxDB: +- Apply default tags to data points. +- Buffer points into batches to optimize data transfer. +- Automatically retry requests on failure. +- Set an optional HTTP proxy address for your network. + +### Before you begin + +- [Install the client library and other dependencies](/influxdb/v2.5/api-guide/client-libraries/nodejs/install/). +### Write data with the client library + +1. Instantiate an `InfluxDB` client. Provide your InfluxDB URL and API token. + + ```js + import {InfluxDB, Point} from '@influxdata/influxdb-client' + + const influxDB = new InfluxDB({YOUR_URL, YOUR_API_TOKEN}) + ``` + Replace the following: + - *`YOUR_URL`*: InfluxDB URL + - *`YOUR_API_TOKEN`*: InfluxDB API token + +2. Use the `getWriteApi()` method of the client to create a **write client**. + Provide your InfluxDB organization ID and bucket name. + + ```js + const writeApi = influxDB.getWriteApi(YOUR_ORG, YOUR_BUCKET) + ``` + Replace the following: + - *`YOUR_ORG`*: InfluxDB organization ID + - *`YOUR_BUCKET`*: InfluxDB bucket name + +3. To apply one or more [tags](/influxdb/v2.5/reference/glossary/#tag) to all points, use the `useDefaultTags()` method. + Provide tags as an object of key/value pairs. + + ```js + writeApi.useDefaultTags({region: 'west'}) + ``` + +4. Use the `Point()` constructor to create a [point](/influxdb/v2.5/reference/glossary/#point). + 1. Call the constructor and provide a [measurement](/influxdb/v2.5/reference/glossary/#measurement). + 2. To add one or more tags, chain the `tag()` method to the constructor. + Provide a `name` and `value`. + 3. To add a field of type `float`, chain the `floatField()` method to the constructor. + Provide a `name` and `value`. + + ```js + const point1 = new Point('temperature') + .tag('sensor_id', 'TLM010') + .floatField('value', 24) + ``` + +5. Use the `writePoint()` method to write the point to your InfluxDB bucket. + Finally, use the `close()` method to flush all pending writes. + The example logs the new data point followed by "WRITE FINISHED" to stdout. + + ```js + writeApi.writePoint(point1) + + writeApi.close().then(() => { + console.log('WRITE FINISHED') + }) + ``` + +### Complete example + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Curl](#curl) +[Node.js](#nodejs) +{{% /code-tabs %}} +{{% code-tab-content %}} + +```sh +{{< get-shared-text "api/v2.0/write/write.sh" >}} +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + +```js +{{< get-shared-text "api/v2.0/write/write.mjs" >}} +``` + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +To run the example from a file, set your InfluxDB environment variables and use `node` to execute the JavaScript file. + +```sh +export INFLUX_URL=http://localhost:8086 && \ +export INFLUX_TOKEN=YOUR_API_TOKEN && \ +export INFLUX_ORG=YOUR_ORG && \ +export INFLUX_BUCKET=YOUR_BUCKET && \ +node write.js +``` + +### Response codes +_For information about **InfluxDB API response codes**, see +[InfluxDB API Write documentation](/influxdb/cloud/api/#operation/PostWrite)._ diff --git a/content/influxdb/v2.5/api-guide/client-libraries/php.md b/content/influxdb/v2.5/api-guide/client-libraries/php.md new file mode 100644 index 000000000..3a3d00ffa --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/php.md @@ -0,0 +1,20 @@ +--- +title: PHP client library +seotitle: Use the InfluxDB PHP client library +list_title: PHP +description: Use the InfluxDB PHP client library to interact with InfluxDB. +external_url: https://github.com/influxdata/influxdb-client-php +menu: + influxdb_2_5: + name: PHP + parent: Client libraries + params: + url: https://github.com/influxdata/influxdb-client-php +weight: 201 +--- + +PHP is a popular general-purpose scripting language primarily used for web development. + +The documentation for this client library is available on GitHub. + +PHP InfluxDB client \ No newline at end of file diff --git a/content/influxdb/v2.5/api-guide/client-libraries/python.md b/content/influxdb/v2.5/api-guide/client-libraries/python.md new file mode 100644 index 000000000..0f0ff662f --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/python.md @@ -0,0 +1,193 @@ +--- +title: Python client library +seotitle: Use the InfluxDB Python client library +list_title: Python +description: > + Use the InfluxDB Python client library to interact with InfluxDB. +menu: + influxdb_2_5: + name: Python + parent: Client libraries +influxdb/v2.5/tags: [client libraries, python] +aliases: + - /influxdb/v2.5/reference/api/client-libraries/python/ + - /influxdb/v2.5/reference/api/client-libraries/python-cl-guide/ + - /influxdb/v2.5/tools/client-libraries/python/ +weight: 201 +--- + +Use the [InfluxDB Python client library](https://github.com/influxdata/influxdb-client-python) to integrate InfluxDB into Python scripts and applications. + +This guide presumes some familiarity with Python and InfluxDB. +If just getting started, see [Get started with InfluxDB](/influxdb/v2.5/get-started/). + +## Before you begin + +1. Install the InfluxDB Python library: + + ```sh + pip install influxdb-client + ``` + +2. Ensure that InfluxDB is running. + If running InfluxDB locally, visit http://localhost:8086. + (If using InfluxDB Cloud, visit the URL of your InfluxDB Cloud UI. + For example: https://us-west-2-1.aws.cloud2.influxdata.com.) + +## Write data to InfluxDB with Python + +We are going to write some data in [line protocol](/influxdb/v2.5/reference/syntax/line-protocol/) using the Python library. + +1. In your Python program, import the InfluxDB client library and use it to write data to InfluxDB. + + ```python + import influxdb_client + from influxdb_client.client.write_api import SYNCHRONOUS + ``` + +2. Define a few variables with the name of your [bucket](/influxdb/v2.5/organizations/buckets/), [organization](/influxdb/v2.5/organizations/), and [token](/influxdb/v2.5/security/tokens/). + + ```python + bucket = "" + org = "" + token = "" + # Store the URL of your InfluxDB instance + url="http://localhost:8086" + ``` + +3. Instantiate the client. The `InfluxDBClient` object takes three named parameters: `url`, `org`, and `token`. Pass in the named parameters. + + ```python + client = influxdb_client.InfluxDBClient( + url=url, + token=token, + org=org + ) + ``` + The `InfluxDBClient` object has a `write_api` method used for configuration. + +4. Instantiate a **write client** using the `client` object and the `write_api` method. Use the `write_api` method to configure the writer object. + + ```python + write_api = client.write_api(write_options=SYNCHRONOUS) + ``` + +5. Create a [point](/influxdb/v2.5/reference/glossary/#point) object and write it to InfluxDB using the `write` method of the API writer object. The write method requires three parameters: `bucket`, `org`, and `record`. + + ```python + p = influxdb_client.Point("my_measurement").tag("location", "Prague").field("temperature", 25.3) + write_api.write(bucket=bucket, org=org, record=p) + ``` + +### Complete example write script + +```python +import influxdb_client +from influxdb_client.client.write_api import SYNCHRONOUS + +bucket = "" +org = "" +token = "" +# Store the URL of your InfluxDB instance +url="http://localhost:8086" + +client = influxdb_client.InfluxDBClient( + url=url, + token=token, + org=org +) + +# Write script +write_api = client.write_api(write_options=SYNCHRONOUS) + +p = influxdb_client.Point("my_measurement").tag("location", "Prague").field("temperature", 25.3) +write_api.write(bucket=bucket, org=org, record=p) +``` +## Query data from InfluxDB with Python + +1. Instantiate the **query client**. + + ```python + query_api = client.query_api() + ``` + +2. Create a Flux query, and then format it as a Python string. + + ```python + query = 'from(bucket:"my-bucket")\ + |> range(start: -10m)\ + |> filter(fn:(r) => r._measurement == "my_measurement")\ + |> filter(fn:(r) => r.location == "Prague")\ + |> filter(fn:(r) => r._field == "temperature")' + ``` + + The query client sends the Flux query to InfluxDB and returns a Flux object with a table structure. + +3. Pass the `query()` method two named parameters:`org` and `query`. + + ```python + result = query_api.query(org=org, query=query) + ``` + +4. Iterate through the tables and records in the Flux object. + - Use the `get_value()` method to return values. + - Use the `get_field()` method to return fields. + + ```python + results = [] + for table in result: + for record in table.records: + results.append((record.get_field(), record.get_value())) + + print(results) + [(temperature, 25.3)] + ``` + +**The Flux object provides the following methods for accessing your data:** + +- `get_measurement()`: Returns the measurement name of the record. +- `get_field()`: Returns the field name. +- `get_value()`: Returns the actual field value. +- `values`: Returns a map of column values. +- `values.get("")`: Returns a value from the record for given column. +- `get_time()`: Returns the time of the record. +- `get_start()`: Returns the inclusive lower time bound of all records in the current table. +- `get_stop()`: Returns the exclusive upper time bound of all records in the current table. + + +### Complete example query script + +```python +import influxdb_client +from influxdb_client.client.write_api import SYNCHRONOUS + +bucket = "" +org = "" +token = "" +# Store the URL of your InfluxDB instance +url="http://localhost:8086" + +client = influxdb_client.InfluxDBClient( + url=url, + token=token, + org=org +) + +# Query script +query_api = client.query_api() +query = 'from(bucket:"my-bucket")\ +|> range(start: -10m)\ +|> filter(fn:(r) => r._measurement == "my_measurement")\ +|> filter(fn:(r) => r.location == "Prague")\ +|> filter(fn:(r) => r._field == "temperature")' +result = query_api.query(org=org, query=query) +results = [] +for table in result: + for record in table.records: + results.append((record.get_field(), record.get_value())) + +print(results) +[(temperature, 25.3)] +``` + +For more information, see the [Python client README on GitHub](https://github.com/influxdata/influxdb-client-python). diff --git a/content/influxdb/v2.5/api-guide/client-libraries/r.md b/content/influxdb/v2.5/api-guide/client-libraries/r.md new file mode 100644 index 000000000..fb99497c5 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/r.md @@ -0,0 +1,20 @@ +--- +title: R package client library +list_title: R +seotitle: Use the InfluxDB client R package +description: Use the InfluxDB client R package to interact with InfluxDB. +external_url: https://github.com/influxdata/influxdb-client-r +menu: + influxdb_2_5: + name: R + parent: Client libraries + params: + url: https://github.com/influxdata/influxdb-client-r +weight: 201 +--- + +R is a programming language and software environment for statistical analysis, reporting, and graphical representation primarily used in data science. + +The documentation for this client library is available on GitHub. + +R InfluxDB client \ No newline at end of file diff --git a/content/influxdb/v2.5/api-guide/client-libraries/ruby.md b/content/influxdb/v2.5/api-guide/client-libraries/ruby.md new file mode 100644 index 000000000..a932bb76f --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/ruby.md @@ -0,0 +1,20 @@ +--- +title: Ruby client library +seotitle: Use the InfluxDB Ruby client library +list_title: Ruby +description: Use the InfluxDB Ruby client library to interact with InfluxDB. +external_url: https://github.com/influxdata/influxdb-client-ruby +menu: + influxdb_2_5: + name: Ruby + parent: Client libraries + params: + url: https://github.com/influxdata/influxdb-client-ruby +weight: 201 +--- + +Ruby is a highly flexible, open-source, object-oriented programming language. + +The documentation for this client library is available on GitHub. + +Ruby InfluxDB client \ No newline at end of file diff --git a/content/influxdb/v2.5/api-guide/client-libraries/scala.md b/content/influxdb/v2.5/api-guide/client-libraries/scala.md new file mode 100644 index 000000000..d00a53b79 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/scala.md @@ -0,0 +1,20 @@ +--- +title: Scala client library +seotitle: Use the InfluxDB Scala client library +list_title: Scala +description: Use the InfluxDB Scala client library to interact with InfluxDB. +external_url: https://github.com/influxdata/influxdb-client-java/tree/master/client-scala +menu: + influxdb_2_5: + name: Scala + parent: Client libraries + params: + url: https://github.com/influxdata/influxdb-client-java/tree/master/client-scala +weight: 201 +--- + +Scala is a general-purpose programming language that supports both object-oriented and functional programming. + +The documentation for this client library is available on GitHub. + +Scala InfluxDB client \ No newline at end of file diff --git a/content/influxdb/v2.5/api-guide/client-libraries/swift.md b/content/influxdb/v2.5/api-guide/client-libraries/swift.md new file mode 100644 index 000000000..0faebfe59 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/client-libraries/swift.md @@ -0,0 +1,20 @@ +--- +title: Swift client library +seotitle: Use the InfluxDB Swift client library +list_title: Swift +description: Use the InfluxDB Swift client library to interact with InfluxDB. +external_url: https://github.com/influxdata/influxdb-client-swift +menu: + influxdb_2_5: + name: Swift + parent: Client libraries + params: + url: https://github.com/influxdata/influxdb-client-swift +weight: 201 +--- + +Swift is a programming language created by Apple for building applications accross multiple Apple platforms. + +The documentation for this client library is available on GitHub. + +Swift InfluxDB client \ No newline at end of file diff --git a/content/influxdb/v2.5/api-guide/postman.md b/content/influxdb/v2.5/api-guide/postman.md new file mode 100644 index 000000000..97d803dc9 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/postman.md @@ -0,0 +1,57 @@ +--- +title: Use Postman with the InfluxDB API +description: > + Use [Postman](https://www.postman.com/), a popular tool for exploring APIs, + to interact with the [InfluxDB API](/influxdb/v2.5/api-guide/). +menu: + influxdb_2_5: + parent: Tools & integrations + name: Use Postman +weight: 105 +influxdb/v2.5/tags: [api, authentication] +aliases: + - /influxdb/v2.5/reference/api/postman/ +--- + +Use [Postman](https://www.postman.com/), a popular tool for exploring APIs, +to interact with the [InfluxDB API](/influxdb/v2.5/api-guide/). + +## Install Postman + +Download Postman from the [official downloads page](https://www.postman.com/downloads/). + +Or to install with Homebrew on macOS, run the following command: + +```sh +brew install --cask postman +``` + +## Send authenticated API requests with Postman + +All requests to the [InfluxDB v2 API](/influxdb/v2.5/api-guide/) must include an [InfluxDB API token](/influxdb/v2.5/security/tokens/). + +{{% note %}} + +#### Authenticate with a username and password + +If you need to send a username and password (`Authorization: Basic`) to the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/), see how to [authenticate with a username and password scheme](/influxdb/v2.5/reference/api/influxdb-1x/#authenticate-with-the-token-scheme). + +{{% /note %}} + +To configure Postman to send an [InfluxDB API token](/influxdb/v2.5/security/tokens/) with the `Authorization: Token` HTTP header, do the following: + +1. If you have not already, [create a token](/influxdb/v2.5/security/tokens/create-token/). +2. In the Postman **Authorization** tab, select **API Key** in the **Type** dropdown. +3. For **Key**, enter `Authorization`. +4. For **Value**, enter `Token INFLUX_API_TOKEN`, replacing *`INFLUX_API_TOKEN`* with the token generated in step 1. +5. Ensure that the **Add to** option is set to **Header**. + +#### Test authentication credentials + +To test the authentication, in Postman, enter your InfluxDB API `/api/v2/` root endpoint URL and click **Send**. + +###### InfluxDB v2 API root endpoint + +```sh +http://localhost:8086/api/v2 +``` diff --git a/content/influxdb/v2.5/api-guide/tutorials/_index.md b/content/influxdb/v2.5/api-guide/tutorials/_index.md new file mode 100644 index 000000000..be3547936 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/tutorials/_index.md @@ -0,0 +1,30 @@ +--- +title: InfluxDB API client library tutorials +seotitle: Get started with InfluxDB API client libraries +description: Follow step-by-step tutorials to for InfluxDB API client libraries in your favorite framework or language. +weight: 4 +menu: + influxdb_2_5: + name: Client library tutorials + parent: Develop with the API +influxdb/v2.5/tags: [api] +--- + +Follow step-by-step tutorials to build an Internet-of-Things (IoT) application with InfluxData client libraries and your favorite framework or language. +InfluxData and the user community maintain client libraries for developers who want to take advantage of: + +- Idioms for InfluxDB requests, responses, and errors. +- Common patterns in a familiar programming language. +- Faster development and less boilerplate code. + +In these tutorials, you'll use the InfluxDB API and +client libraries to build a modern application, and learn the following: + +- InfluxDB core concepts. +- How the application interacts with devices and InfluxDB. +- How to authenticate apps and devices to the API. +- How to install a client library. +- How to write and query data in InfluxDB. +- How to use the InfluxData UI libraries to format data and create visualizations. + +{{< children >}} diff --git a/content/influxdb/v2.5/api-guide/tutorials/nodejs.md b/content/influxdb/v2.5/api-guide/tutorials/nodejs.md new file mode 100644 index 000000000..8cf489c26 --- /dev/null +++ b/content/influxdb/v2.5/api-guide/tutorials/nodejs.md @@ -0,0 +1,521 @@ +--- +title: JavaScript client library starter +seotitle: Use JavaScript client library to build a sample application +list_title: JavaScript +description: > + Build a JavaScript application that writes, queries, and manages devices with the + InfluxDB client library. +menu: + influxdb_2_5: + identifier: client-library-starter-js + name: JavaScript + parent: Client library tutorials +influxdb/v2.5/tags: [api, javascript, nodejs] +--- + +{{% api/iot-starter-intro %}} + +## Contents + +- [Contents](#contents) +- [Set up InfluxDB](#set-up-influxdb) + - [Authenticate with an InfluxDB API token](#authenticate-with-an-influxdb-api-token) +- [Introducing IoT Starter](#introducing-iot-starter) +- [Create the application](#create-the-application) +- [Install InfluxDB client library](#install-influxdb-client-library) +- [Configure the client library](#configure-the-client-library) +- [Build the API](#build-the-api) +- [Create the API to list devices](#create-the-api-to-list-devices) + - [Handle requests for device information](#handle-requests-for-device-information) + - [Retrieve and list devices](#retrieve-and-list-devices) +- [Create the API to register devices](#create-the-api-to-register-devices) + - [Create an authorization for the device](#create-an-authorization-for-the-device) + - [Write the device authorization to a bucket](#write-the-device-authorization-to-a-bucket) +- [Install and run the UI](#install-and-run-the-ui) + +## Set up InfluxDB + +If you haven't already, [create an InfluxDB Cloud account](https://www.influxdata.com/products/influxdb-cloud/) or [install InfluxDB OSS](https://www.influxdata.com/products/influxdb/). + +### Authenticate with an InfluxDB API token + +For convenience in development, +[create an _All-Access_ token](/influxdb/v2.5/security/tokens/create-token/) +for your application. This grants your application full read and write +permissions on all resources within your InfluxDB organization. + +{{% note %}} + +For a production application, create and use a +{{% cloud-only %}}custom{{% /cloud-only %}}{{% oss-only %}}read-write{{% /oss-only %}} +token with minimal permissions and only use it with your application. + +{{% /note %}} + +## Introducing IoT Starter + +The application architecture has four layers: + +- **InfluxDB API**: InfluxDB v2 API. +- **IoT device**: Virtual or physical devices write IoT data to the InfluxDB API. +- **UI**: Sends requests to the server and renders views in the browser. +- **API**: Receives requests from the UI, sends requests to InfluxDB, and processes responses from InfluxDB. + +{{% note %}} +For the complete code referenced in this tutorial, see the [influxdata/iot-api-js repository](https://github.com/influxdata/iot-api-js). +{{% /note %}} + +## Install Yarn + +If you haven't already installed `yarn`, follow the [Yarn package manager installation instructions](https://yarnpkg.com/getting-started/install#nodejs-1610-1) for your version of Node.js. + +- To check the installed `yarn` version, enter the following code into your terminal: + + ```bash + yarn --version + ``` + +## Create the application + +Create a directory that will contain your `iot-api` projects. +The following example code creates an `iot-api` directory in your home directory +and changes to the new directory: + +```bash +mkdir ~/iot-api-apps +cd ~/iot-api-apps +``` + +Follow these steps to create a JavaScript application with [Next.js](https://nextjs.org/): + +1. In your `~/iot-api-apps` directory, open a terminal and enter the following commands to create the `iot-api-js` app from the NextJS [learn-starter template](https://github.com/vercel/next-learn/tree/master/basics/learn-starter): + + ```bash + yarn create-next-app iot-api-js --example "https://github.com/vercel/next-learn/tree/master/basics/learn-starter" + ``` + +2. After the installation completes, enter the following commands in your terminal to go into your `./iot-api-js` directory and start the development server: + + ```bash + cd iot-api-js + yarn dev -p 3001 + ``` + +To view the application, visit in your browser. + +## Install InfluxDB client library + +The InfluxDB client library provides the following InfluxDB API interactions: + +- Query data with the Flux language. +- Write data to InfluxDB. +- Batch data in the background. +- Retry requests automatically on failure. + +1. Enter the following command into your terminal to install the client library: + + ```bash + yarn add @influxdata/influxdb-client + ``` + +2. Enter the following command into your terminal to install `@influxdata/influxdb-client-apis`, the _management APIs_ that create, modify, and delete authorizations, buckets, tasks, and other InfluxDB resources: + + ```bash + yarn add @influxdata/influxdb-client-apis + ``` + +For more information about the client library, see the [influxdata/influxdb-client-js repo](https://github.com/influxdata/influxdb-client-js). + +## Configure the client library + +InfluxDB client libraries require configuration properties from your InfluxDB environment. +Typically, you'll provide the following properties as environment variables for your application: + +- `INFLUX_URL` +- `INFLUX_TOKEN` +- `INFLUX_ORG` +- `INFLUX_BUCKET` +- `INFLUX_BUCKET_AUTH` + +Next.js uses the `env` module to provide environment variables to your application. + +The `./.env.development` file is versioned and contains non-secret default settings for your _development_ environment. + +```bash +# .env.development + +INFLUX_URL=http://localhost:8086 +INFLUX_BUCKET=iot_center +INFLUX_BUCKET_AUTH=iot_center_devices +``` + +To configure secrets and settings that aren't added to version control, +create a `./.env.local` file and set the variables--for example, set your InfluxDB token and organization: + +```sh +# .env.local + +# INFLUX_TOKEN +# InfluxDB API token used by the application server to send requests to InfluxDB. +# For convenience in development, use an **All-Access** token. + +INFLUX_TOKEN=29Xx1KH9VkASPR2DSfRfFd82OwGD... + +# INFLUX_ORG +# InfluxDB organization ID you want to use in development. + +INFLUX_ORG=48c88459ee424a04 +``` + +Enter the following commands into your terminal to restart and load the `.env` files: + + 1. `CONTROL+C` to stop the application. + 2. `yarn dev` to start the application. + +Next.js sets variables that you can access in the `process.env` object--for example: + +```ts +console.log(process.env.INFLUX_ORG) +``` + +## Build the API + +Your application API provides server-side HTTP endpoints that process requests from the UI. +Each API endpoint is responsible for the following: + +1. Listen for HTTP requests (from the UI). +2. Translate requests into InfluxDB API requests. +3. Process InfluxDB API responses and handle errors. +4. Respond with status and data (for the UI). + +## Create the API to list devices + +Add the `/api/devices` API endpoint that retrieves, processes, and lists devices. +`/api/devices` uses the `/api/v2/query` InfluxDB API endpoint to query `INFLUX_BUCKET_AUTH` for a registered device. + +### Handle requests for device information + +1. Create a `./pages/api/devices/[[...deviceParams]].js` file to handle requests for `/api/devices` and `/api/devices//measurements/`. + +2. In the file, export a Next.js request `handler` function. +[See the example](https://github.com/influxdata/iot-api-js/blob/18d34bcd59b93ad545c5cd9311164c77f6d1995a/pages/api/devices/%5B%5B...deviceParams%5D%5D.js). + + {{% note %}} +In Next.js, the filename pattern `[[...param]].js` creates a _catch-all_ API route. +To learn more, see [Next.js dynamic API routes](https://nextjs.org/docs/api-routes/dynamic-api-routes). + {{% /note %}} + +### Retrieve and list devices + +Retrieve registered devices in `INFLUX_BUCKET_AUTH` and process the query results. + +1. Create a Flux query that gets the last row of each [series](/influxdb/v2.5/reference/glossary#series) that contains a `deviceauth` measurement. + The example query below returns rows that contain the `key` field (authorization ID) and excludes rows that contain a `token` field (to avoid exposing tokens to the UI). + + ```js + // Flux query finds devices + from(bucket:`${INFLUX_BUCKET_AUTH}`) + |> range(start: 0) + |> filter(fn: (r) => r._measurement == "deviceauth" and r._field != "token") + |> last() + ``` + +2. Use the `QueryApi` client to send the Flux query to the `POST /api/v2/query` InfluxDB API endpoint. + +Create a `./pages/api/devices/_devices.js` file that contains the following: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Node.js](#nodejs) +{{% /code-tabs %}} +{{% code-tab-content %}} + +{{% truncate %}} + +```ts +import { InfluxDB } from '@influxdata/influxdb-client' +import { flux } from '@influxdata/influxdb-client' + +const INFLUX_ORG = process.env.INFLUX_ORG +const INFLUX_BUCKET_AUTH = process.env.INFLUX_BUCKET_AUTH +const influxdb = new InfluxDB({url: process.env.INFLUX_URL, token: process.env.INFLUX_TOKEN}) + +/** + * Gets devices or a particular device when deviceId is specified. Tokens + * are not returned unless deviceId is specified. It can also return devices + * with empty/unknown key, such devices can be ignored (InfluxDB authorization is not associated). + * @param deviceId optional deviceId + * @returns promise with an Record. + */ + export async function getDevices(deviceId) { + const queryApi = influxdb.getQueryApi(INFLUX_ORG) + const deviceFilter = + deviceId !== undefined + ? flux` and r.deviceId == "${deviceId}"` + : flux` and r._field != "token"` + const fluxQuery = flux`from(bucket:${INFLUX_BUCKET_AUTH}) + |> range(start: 0) + |> filter(fn: (r) => r._measurement == "deviceauth"${deviceFilter}) + |> last()` + const devices = {} + + return await new Promise((resolve, reject) => { + queryApi.queryRows(fluxQuery, { + next(row, tableMeta) { + const o = tableMeta.toObject(row) + const deviceId = o.deviceId + if (!deviceId) { + return + } + const device = devices[deviceId] || (devices[deviceId] = {deviceId}) + device[o._field] = o._value + if (!device.updatedAt || device.updatedAt < o._time) { + device.updatedAt = o._time + } + }, + error: reject, + complete() { + resolve(devices) + }, + }) + }) +} +``` + +{{% /truncate %}} + +{{% caption %}}[iot-api-js/pages/api/devices/_devices.js getDevices(deviceId)](https://github.com/influxdata/iot-api-js/blob/18d34bcd59b93ad545c5cd9311164c77f6d1995a/pages/api/devices/_devices.js){{% /caption %}} + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +The `_devices` module exports a `getDevices(deviceId)` function that queries +for registered devices, processes the data, and returns a Promise with the result. +If you invoke the function as `getDevices()` (without a _`deviceId`_), +it retrieves all `deviceauth` points and returns a Promise with `{ DEVICE_ID: ROW_DATA }`. + +To send the query and process results, the `getDevices(deviceId)` function uses the `QueryAPI queryRows(query, consumer)` method. +`queryRows` executes the `query` and provides the Annotated CSV result as an Observable to the `consumer`. +`queryRows` has the following TypeScript signature: + +```ts +queryRows( + query: string | ParameterizedQuery, + consumer: FluxResultObserver +): void +``` + +{{% caption %}}[@influxdata/influxdb-client-js QueryAPI](https://github.com/influxdata/influxdb-client-js/blob/3db2942432b993048d152e0d0e8ec8499eedfa60/packages/core/src/QueryApi.ts){{% /caption %}} + +The `consumer` that you provide must implement the [`FluxResultObserver` interface](https://github.com/influxdata/influxdb-client-js/blob/3db2942432b993048d152e0d0e8ec8499eedfa60/packages/core/src/results/FluxResultObserver.ts) and provide the following callback functions: + +- `next(row, tableMeta)`: processes the next row and table metadata--for example, to prepare the response. +- `error(error)`: receives and handles errors--for example, by rejecting the Promise. +- `complete()`: signals when all rows have been consumed--for example, by resolving the Promise. + +To learn more about Observers, see the [RxJS Guide](https://rxjs.dev/guide/observer). + +## Create the API to register devices + +In this application, a _registered device_ is a point that contains your device ID, authorization ID, and API token. +The API token and authorization permissions allow the device to query and write to `INFLUX_BUCKET`. +In this section, you add the API endpoint that handles requests from the UI, creates an authorization in InfluxDB, +and writes the registered device to the `INFLUX_BUCKET_AUTH` bucket. +To learn more about API tokens and authorizations, see [Manage API tokens](/influxdb/v2.5/security/tokens/) + +The application API uses the following `/api/v2` InfluxDB API endpoints: + +- `POST /api/v2/query`: to query `INFLUX_BUCKET_AUTH` for a registered device. +- `GET /api/v2/buckets`: to get the bucket ID for `INFLUX_BUCKET`. +- `POST /api/v2/authorizations`: to create an authorization for the device. +- `POST /api/v2/write`: to write the device authorization to `INFLUX_BUCKET_AUTH`. + +1. Add a `./pages/api/devices/create.js` file to handle requests for `/api/devices/create`. +2. In the file, export a Next.js request `handler` function that does the following: + + 1. Accept a device ID in the request body. + 2. Query `INFLUX_BUCKET_AUTH` and respond with an error if an authorization exists for the device. + 3. [Create an authorization for the device](#create-an-authorization-for-the-device). + 4. [Write the device ID and authorization to `INFLUX_BUCKET_AUTH`](#write-the-device-authorization-to-a-bucket). + 5. Respond with `HTTP 200` when the write request completes. + +[See the example](https://github.com/influxdata/iot-api-js/blob/25b38c94a1f04ea71f2ef4b9fcba5350d691cb9d/pages/api/devices/create.js). + +### Create an authorization for the device + +In this section, you create an authorization with _read_-_write_ permission to `INFLUX_BUCKET` and receive an API token for the device. +The example below uses the following steps to create the authorization: + +1. Instantiate the `AuthorizationsAPI` client and `BucketsAPI` client with the configuration. +2. Retrieve the bucket ID. +3. Use the client library to send a `POST` request to the `/api/v2/authorizations` InfluxDB API endpoint. + +In `./api/devices/create.js`, add the following `createAuthorization(deviceId)` function: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Node.js](#nodejs) +{{% /code-tabs %}} +{{% code-tab-content %}} + +{{% truncate %}} + +```js +import { InfluxDB } from '@influxdata/influxdb-client' +import { getDevices } from './_devices' +import { AuthorizationsAPI, BucketsAPI } from '@influxdata/influxdb-client-apis' +import { Point } from '@influxdata/influxdb-client' + +const INFLUX_ORG = process.env.INFLUX_ORG +const INFLUX_BUCKET_AUTH = process.env.INFLUX_BUCKET_AUTH +const INFLUX_BUCKET = process.env.INFLUX_BUCKET + +const influxdb = new InfluxDB({url: process.env.INFLUX_URL, token: process.env.INFLUX_TOKEN}) + +/** + * Creates an authorization for a supplied deviceId + * @param {string} deviceId client identifier + * @returns {import('@influxdata/influxdb-client-apis').Authorization} promise with authorization or an error + */ +async function createAuthorization(deviceId) { + const authorizationsAPI = new AuthorizationsAPI(influxdb) + const bucketsAPI = new BucketsAPI(influxdb) + const DESC_PREFIX = 'IoTCenterDevice: ' + + const buckets = await bucketsAPI.getBuckets({name: INFLUX_BUCKET, orgID: INFLUX_ORG}) + const bucketId = buckets.buckets[0]?.id + + return await authorizationsAPI.postAuthorizations( + { + body: { + orgID: INFLUX_ORG, + description: DESC_PREFIX + deviceId, + permissions: [ + { + action: 'read', + resource: {type: 'buckets', id: bucketId, orgID: INFLUX_ORG}, + }, + { + action: 'write', + resource: {type: 'buckets', id: bucketId, orgID: INFLUX_ORG}, + }, + ], + }, + } + ) + +} +``` + +{{% /truncate %}} +{{% caption %}}[iot-api-js/pages/api/devices/create.js](https://github.com/influxdata/iot-api-js/blob/42a37d683b5e4df601422f85d2c22f5e9d592e68/pages/api/devices/create.js){{% /caption %}} + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +To create an authorization that has _read_-_write_ permission to `INFLUX_BUCKET`, you need the bucket ID. +To retrieve the bucket ID, +`createAuthorization(deviceId)` calls the `BucketsAPI getBuckets` function that sends a `GET` request to +the `/api/v2/buckets` InfluxDB API endpoint. +`createAuthorization(deviceId)` then passes a new authorization in the request body with the following: + +- Bucket ID. +- Organization ID. +- Description: `IoTCenterDevice: DEVICE_ID`. +- List of permissions to the bucket. + +To learn more about API tokens and authorizations, see [Manage API tokens](/influxdb/v2.5/security/tokens/). + +Next, [write the device authorization to a bucket](#write-the-device-authorization-to-a-bucket). + +### Write the device authorization to a bucket + +With a device authorization in InfluxDB, write a point for the device and authorization details to `INFLUX_BUCKET_AUTH`. +Storing the device authorization in a bucket allows you to do the following: + +- Report device authorization history. +- Manage devices with and without tokens. +- Assign the same token to multiple devices. +- Refresh tokens. + +To write a point to InfluxDB, use the InfluxDB client library to send a `POST` request to the `/api/v2/write` InfluxDB API endpoint. +In `./pages/api/devices/create.js`, add the following `createDevice(deviceId)` function: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Node.js](#nodejs) +{{% /code-tabs %}} +{{% code-tab-content %}} + +```ts +/** Creates an authorization for a deviceId and writes it to a bucket */ +async function createDevice(deviceId) { + let device = (await getDevices(deviceId)) || {} + let authorizationValid = !!Object.values(device)[0]?.key + if(authorizationValid) { + console.log(JSON.stringify(device)) + return Promise.reject('This device ID is already registered and has an authorization.') + } else { + console.log(`createDeviceAuthorization: deviceId=${deviceId}`) + const authorization = await createAuthorization(deviceId) + const writeApi = influxdb.getWriteApi(INFLUX_ORG, INFLUX_BUCKET_AUTH, 'ms', { + batchSize: 2, + }) + const point = new Point('deviceauth') + .tag('deviceId', deviceId) + .stringField('key', authorization.id) + .stringField('token', authorization.token) + writeApi.writePoint(point) + await writeApi.close() + return + } +} +``` + +{{% caption %}}[iot-api-js/pages/api/devices/create.js](https://github.com/influxdata/iot-api-js/blob/25b38c94a1f04ea71f2ef4b9fcba5350d691cb9d/pages/api/devices/create.js){{% /caption %}} + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +`createDevice(device_id)` takes a _`device_id`_ and writes data to `INFLUX_BUCKET_AUTH` in the following steps: + +1. Initialize `InfluxDBClient()` with `url`, `token`, and `org` values from the configuration. +2. Initialize a `WriteAPI` client for writing data to an InfluxDB bucket. +3. Create a `Point`. +4. Use `writeApi.writePoint(point)` to write the `Point` to the bucket. + +The function writes a point with the following elements: + +| Element | Name | Value | +|:------------|:-----------|:--------------------------| +| measurement | | `deviceauth` | +| tag | `deviceId` | device ID | +| field | `key` | authorization ID | +| field | `token` | authorization (API) token | + +## Install and run the UI + +`influxdata/iot-api-ui` is a standalone [Next.js React](https://nextjs.org/docs/basic-features/pages) UI that uses your application API to write and query data in InfluxDB. +`iot-api-ui` uses Next.js _[rewrites](https://nextjs.org/docs/api-reference/next.config.js/rewrites)_ to route all requests in the `/api/` path to your API. + +To install and run the UI, do the following: + +1. In your `~/iot-api-apps` directory, clone the [`influxdata/iot-api-ui` repo](https://github.com/influxdata/iot-api-ui) and go into the `iot-api-ui` directory--for example: + + ```bash + cd ~/iot-api-apps + git clone git@github.com:influxdata/iot-api-ui.git + cd ./iot-app-ui + ``` + +2. The `./.env.development` file contains default configuration settings that you can + edit or override (with a `./.env.local` file). +3. To start the UI, enter the following command into your terminal: + + ```bash + yarn dev + ``` + + To view the list and register devices, visit in your browser. + +To learn more about the UI components, see [`influxdata/iot-api-ui`](https://github.com/influxdata/iot-api-ui). diff --git a/content/influxdb/v2.5/api-guide/tutorials/python.md b/content/influxdb/v2.5/api-guide/tutorials/python.md new file mode 100644 index 000000000..f99f1d9ff --- /dev/null +++ b/content/influxdb/v2.5/api-guide/tutorials/python.md @@ -0,0 +1,583 @@ +--- +title: Python client library starter +seotitle: Use Python client library to build a sample application +list_title: Python +description: > + Build an application that writes, queries, and manages devices with the InfluxDB + client library for Python. +weight: 3 +menu: + influxdb_2_5: + identifier: client-library-starter-py + name: Python + parent: Client library tutorials +influxdb/v2.5/tags: [api, python] +--- + +{{% api/iot-starter-intro %}} +- How to use the InfluxData UI libraries to format data and create visualizations. + +## Contents + +- [Contents](#contents) +- [Set up InfluxDB](#set-up-influxdb) + - [Authenticate with an InfluxDB API token](#authenticate-with-an-influxdb-api-token) +- [Introducing IoT Starter](#introducing-iot-starter) +- [Create the application](#create-the-application) +- [Install InfluxDB client library](#install-influxdb-client-library) +- [Configure the client library](#configure-the-client-library) +- [Build the API](#build-the-api) +- [Create the API to register devices](#create-the-api-to-register-devices) + - [Create an authorization for the device](#create-an-authorization-for-the-device) + - [Write the device authorization to a bucket](#write-the-device-authorization-to-a-bucket) +- [Create the API to list devices](#create-the-api-to-list-devices) +- [Create IoT virtual device](#create-iot-virtual-device) +- [Write telemetry data](#write-telemetry-data) +- [Query telemetry data](#query-telemetry-data) +- [Define API responses](#define-api-responses) +- [Install and run the UI](#install-and-run-the-ui) + +## Set up InfluxDB + +If you haven't already, [create an InfluxDB Cloud account](https://www.influxdata.com/products/influxdb-cloud/) or [install InfluxDB OSS](https://www.influxdata.com/products/influxdb/). + +### Authenticate with an InfluxDB API token + +For convenience in development, +[create an _All-Access_ token](/influxdb/v2.5/security/tokens/create-token/) +for your application. This grants your application full read and write +permissions on all resources within your InfluxDB organization. + +{{% note %}} + +For a production application, create and use a +{{% cloud-only %}}custom{{% /cloud-only %}}{{% oss-only %}}read-write{{% /oss-only %}} +token with minimal permissions and only use it with your application. + +{{% /note %}} + +## Introducing IoT Starter + +The application architecture has four layers: + +- **InfluxDB API**: InfluxDB v2 API. +- **IoT device**: Virtual or physical devices write IoT data to the InfluxDB API. +- **UI**: Sends requests to the server and renders views in the browser. +- **API**: Receives requests from the UI, sends requests to InfluxDB, + and processes responses from InfluxDB. + +{{% note %}} +For the complete code referenced in this tutorial, see the [influxdata/iot-api-python repository](https://github.com/influxdata/iot-api-python). +{{% /note %}} + +## Create the application + +Create a directory that will contain your `iot-api` projects. +The following example code creates an `iot-api` directory in your home directory +and changes to the new directory: + +```bash +mkdir ~/iot-api-apps +cd ~/iot-api-apps +``` + +Use [Flask](https://flask.palletsprojects.com/), a lightweight Python web +framework, +to create your application. + +1. In your `~/iot-api-apps` directory, open a terminal and enter the following commands to create and navigate into a new project directory: + + ```bash + mkdir iot-api-python && cd $_ + ``` + +2. Enter the following commands in your terminal to create and activate a Python virtual environment for the project: + + ```bash + # Create a new virtual environment named "virtualenv" + # Python 3.8+ + python -m venv virtualenv + + # Activate the virtualenv (OS X & Linux) + source virtualenv/bin/activate + ``` + +3. After activation completes, enter the following commands in your terminal to install Flask with the `pip` package installer (included with Python): + + ```bash + pip install Flask + ``` + +4. In your project, create a `app.py` file that: + + 1. Imports the Flask package. + 2. Instantiates a Flask application. + 3. Provides a route to execute the application. + + ```python + from flask import Flask + app = Flask(__name__) + + @app.route("/") + def hello(): + return "Hello World!" + ``` + + {{% caption %}}[influxdata/iot-api-python app.py](https://github.com/influxdata/iot-api-python/blob/main/app.py){{% /caption %}} + + Start your application. + The following example code starts the application + on `http://localhost:3001` with debugging and hot-reloading enabled: + + ```bash + export FLASK_ENV=development + flask run -h localhost -p 3001 + ``` + + In your browser, visit to view the “Hello World!” response. + +## Install InfluxDB client library + +The InfluxDB client library provides the following InfluxDB API interactions: + +- Query data with the Flux language. +- Write data to InfluxDB. +- Batch data in the background. +- Retry requests automatically on failure. + +Enter the following command into your terminal to install the client library: + +```bash +pip install influxdb-client +``` + +For more information about the client library, see the [influxdata/influxdb-client-python repo](https://github.com/influxdata/influxdb-client-python). + +## Configure the client library + +InfluxDB client libraries require configuration properties from your InfluxDB environment. +Typically, you'll provide the following properties as environment variables for your application: + +- `INFLUX_URL` +- `INFLUX_TOKEN` +- `INFLUX_ORG` +- `INFLUX_BUCKET` +- `INFLUX_BUCKET_AUTH` + +To set up the client configuration, create a `config.ini` in your project's top +level directory and paste the following to provide the necessary InfluxDB credentials: + +```ini +[APP] +INFLUX_URL = +INFLUX_TOKEN = +INFLUX_ORG = +INFLUX_BUCKET = iot_center +INFLUX_BUCKET_AUTH = iot_center_devices +``` + +{{% caption %}}[/iot-api-python/config.ini](https://github.com/influxdata/iot-api-python/blob/main/config.ini){{% /caption %}} + +Replace the following: + +- **``**: your InfluxDB instance URL. +- **``**: your InfluxDB [API token](#authorization) with permission to query (_read_) buckets +and create (_write_) authorizations for devices. +- **``**: your InfluxDB organization ID. + +## Build the API + +Your application API provides server-side HTTP endpoints that process requests from the UI. +Each API endpoint is responsible for the following: + +1. Listen for HTTP requests (from the UI). +2. Translate requests into InfluxDB API requests. +3. Process InfluxDB API responses and handle errors. +4. Respond with status and data (for the UI). + +## Create the API to register devices + +In this application, a _registered device_ is a point that contains your device ID, authorization ID, and API token. +The API token and authorization permissions allow the device to query and write to `INFLUX_BUCKET`. +In this section, you add the API endpoint that handles requests from the UI, creates an authorization in InfluxDB, +and writes the registered device to the `INFLUX_BUCKET_AUTH` bucket. +To learn more about API tokens and authorizations, see [Manage API tokens](/influxdb/v2.5/security/tokens/) + +The application API uses the following `/api/v2` InfluxDB API endpoints: + +- `POST /api/v2/query`: to query `INFLUX_BUCKET_AUTH` for a registered device. +- `GET /api/v2/buckets`: to get the bucket ID for `INFLUX_BUCKET`. +- `POST /api/v2/authorizations`: to create an authorization for the device. +- `POST /api/v2/write`: to write the device authorization to `INFLUX_BUCKET_AUTH`. + +### Create an authorization for the device + +In this section, you create an authorization with _read_-_write_ permission to `INFLUX_BUCKET` and receive an API token for the device. +The example below uses the following steps to create the authorization: + +1. Instantiate the `AuthorizationsAPI` client and `BucketsAPI` client with the configuration. +2. Retrieve the bucket ID. +3. Use the client library to send a `POST` request to the `/api/v2/authorizations` InfluxDB API endpoint. + +Create a `./api/devices.py` file that contains the following: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Python](#python) +{{% /code-tabs %}} +{{% code-tab-content %}} + +{{% truncate %}} + +```python +# Import the dependencies. +import configparser +from datetime import datetime +from uuid import uuid4 + +# Import client library classes. +from influxdb_client import Authorization, InfluxDBClient, Permission, PermissionResource, Point, WriteOptions +from influxdb_client.client.authorizations_api import AuthorizationsApi +from influxdb_client.client.bucket_api import BucketsApi +from influxdb_client.client.query_api import QueryApi +from influxdb_client.client.write_api import SYNCHRONOUS + +from api.sensor import Sensor + +# Get the configuration key-value pairs. + +config = configparser.ConfigParser() +config.read('config.ini') + +def create_authorization(device_id) -> Authorization: + influxdb_client = InfluxDBClient(url=config.get('APP', 'INFLUX_URL'), + token=os.environ.get('INFLUX_TOKEN'), + org=os.environ.get('INFLUX_ORG')) + + authorization_api = AuthorizationsApi(influxdb_client) + # get bucket_id from bucket + buckets_api = BucketsApi(influxdb_client) + buckets = buckets_api.find_bucket_by_name(config.get('APP', 'INFLUX_BUCKET')) # function returns only 1 bucket + bucket_id = buckets.id + org_id = buckets.org_id + desc_prefix = f'IoTCenterDevice: {device_id}' + org_resource = PermissionResource(org_id=org_id, id=bucket_id, type="buckets") + read = Permission(action="read", resource=org_resource) + write = Permission(action="write", resource=org_resource) + permissions = [read, write] + authorization = Authorization(org_id=org_id, permissions=permissions, description=desc_prefix) + request = authorization_api.create_authorization(authorization) + return request +``` + +{{% /truncate %}} +{{% caption %}}[iot-api-python/api/devices.py](https://github.com/influxdata/iot-api-python/blob/d389a0e072c7a03dfea99e5663bdc32be94966bb/api/devices.py#L145){{% /caption %}} + +To create an authorization that has _read_-_write_ permission to `INFLUX_BUCKET`, you need the bucket ID. +To retrieve the bucket ID, `create_authorization(deviceId)` calls the +`BucketsAPI find_bucket_by_name` function that sends a `GET` request to +the `/api/v2/buckets` InfluxDB API endpoint. +`create_authorization(deviceId)` then passes a new authorization in the request body with the following: + +- Bucket ID. +- Organization ID. +- Description: `IoTCenterDevice: DEVICE_ID`. +- List of permissions to the bucket. + +To learn more about API tokens and authorizations, see [Manage API tokens](/influxdb/v2.5/security/tokens/). + +Next, [write the device authorization to a bucket](#write-the-device-authorization-to-a-bucket). + +### Write the device authorization to a bucket + +With a device authorization in InfluxDB, write a point for the device and authorization details to `INFLUX_BUCKET_AUTH`. +Storing the device authorization in a bucket allows you to do the following: + +- Report device authorization history. +- Manage devices with and without tokens. +- Assign the same token to multiple devices. +- Refresh tokens. + +To write a point to InfluxDB, use the InfluxDB client library to send a `POST` request to the `/api/v2/write` InfluxDB API endpoint. +In `./api/devices.py`, add the following `create_device(device_id)` function: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Python](#python) +{{% /code-tabs %}} +{{% code-tab-content %}} + +```python +def create_device(device_id=None): + influxdb_client = InfluxDBClient(url=config.get('APP', 'INFLUX_URL'), + token=config.get('APP', 'INFLUX_TOKEN'), + org=config.get('APP', 'INFLUX_ORG')) + if device_id is None: + device_id = str(uuid4()) + write_api = influxdb_client.write_api(write_options=SYNCHRONOUS) + point = Point('deviceauth') \ + .tag("deviceId", device_id) \ + .field('key', f'fake_auth_id_{device_id}') \ + .field('token', f'fake_auth_token_{device_id}') + client_response = write_api.write(bucket=config.get('APP', 'INFLUX_BUCKET_AUTH'), record=point) + # write() returns None on success + if client_response is None: + return device_id + # Return None on failure + return None +``` + +{{% caption %}}[iot-api-python/api/devices.py](https://github.com/influxdata/iot-api-python/blob/f354941c80b6bac643ca29efe408fde1deebdc96/api/devices.py#L47){{% /caption %}} + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +`create_device(device_id)` takes a _`device_id`_ and writes data to `INFLUX_BUCKET_AUTH` in the following steps: + +1. Initialize `InfluxDBClient()` with `url`, `token`, and `org` values from the configuration. +2. Initialize a `WriteAPI` client for writing data to an InfluxDB bucket. +3. Create a `Point`. +4. Use `write_api.write()` to write the `Point` to the bucket. +5. Check for failures--if the write was successful, `write_api` returns `None`. +6. Return _`device_id`_ if successful; `None` otherwise. + +The function writes a point with the following elements: + +| Element | Name | Value | +|:------------|:-----------|:--------------------------| +| measurement | | `deviceauth` | +| tag | `deviceId` | device ID | +| field | `key` | authorization ID | +| field | `token` | authorization (API) token | + +Next, [create the API to list devices](#create-the-api-to-list-devices). + +## Create the API to list devices + +Add the `/api/devices` API endpoint that retrieves, processes, and lists registered devices. + +1. Create a Flux query that gets the last row of each [series](/influxdb/v2.5/reference/glossary#series) that contains a `deviceauth` measurement. + The example query below returns rows that contain the `key` field (authorization ID) and excludes rows that contain a `token` field (to avoid exposing tokens to the UI). + + ```js + // Flux query finds devices + from(bucket:`${INFLUX_BUCKET_AUTH}`) + |> range(start: 0) + |> filter(fn: (r) => r._measurement == "deviceauth" and r._field != "token") + |> last() + ``` + +2. Use the `QueryApi` client to send the Flux query to the `POST /api/v2/query` InfluxDB API endpoint. + + In `./api/devices.py`, add the following: + + {{< code-tabs-wrapper >}} + {{% code-tabs %}} + [Python](#python) + {{% /code-tabs %}} + {{% code-tab-content %}} + + {{% truncate %}} + + ```python + def get_device(device_id=None) -> {}: + influxdb_client = InfluxDBClient(url=config.get('APP', 'INFLUX_URL'), + token=os.environ.get('INFLUX_TOKEN'), + org=os.environ.get('INFLUX_ORG')) + # Queries must be formatted with single and double quotes correctly + query_api = QueryApi(influxdb_client) + device_filter = '' + if device_id: + device_id = str(device_id) + device_filter = f'r.deviceId == "{device_id}" and r._field != "token"' + else: + device_filter = f'r._field != "token"' + + flux_query = f'from(bucket: "{config.get("APP", "INFLUX_BUCKET_AUTH")}") ' \ + f'|> range(start: 0) ' \ + f'|> filter(fn: (r) => r._measurement == "deviceauth" and {device_filter}) ' \ + f'|> last()' + + response = query_api.query(flux_query) + result = [] + for table in response: + for record in table.records: + try: + 'updatedAt' in record + except KeyError: + record['updatedAt'] = record.get_time() + record[record.get_field()] = record.get_value() + result.append(record.values) + return result + ``` + +{{% /truncate %}} + +{{% caption %}}[iot-api-python/api/devices.py get_device()](https://github.com/influxdata/iot-api-python/blob/9bf44a659424a27eb937d545dc0455754354aef5/api/devices.py#L30){{% /caption %}} + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +The `get_device(device_id)` function does the following: + +1. Instantiates a `QueryApi` client and sends the Flux query to InfluxDB. +2. Iterates over the `FluxTable` in the response and returns a list of tuples. + +## Create IoT virtual device + +Create a `./api/sensor.py` file that generates simulated weather telemetry data. +Follow the [example code](https://github.com/influxdata/iot-api-python/blob/f354941c80b6bac643ca29efe408fde1deebdc96/api/sensor.py) to create the IoT virtual device. + +Next, generate data for virtual devices and [write the data to InfluxDB](#write-telemetry-data). + +## Write telemetry data + +In this section, you write telemetry data to an InfluxDB bucket. +To write data, use the InfluxDB client library to send a `POST` request to the `/api/v2/write` InfluxDB API endpoint. + +The example below uses the following steps to generate data and then write it to InfluxDB: + +1. Initialize a `WriteAPI` instance. +2. Create a `Point` with the `environment` measurement and data fields for temperature, humidity, pressure, latitude, and longitude. +3. Use the `WriteAPI write` method to send the point to InfluxDB. + +In `./api/devices.py`, add the following `write_measurements(device_id)` function: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Python](#python) +{{% /code-tabs %}} +{{% code-tab-content %}} + +```python +def write_measurements(device_id): + influxdb_client = InfluxDBClient(url=config.get('APP', 'INFLUX_URL'), + token=config.get('APP', 'INFLUX_TOKEN'), + org=config.get('APP', 'INFLUX_ORG')) + write_api = influxdb_client.write_api(write_options=SYNCHRONOUS) + virtual_device = Sensor() + coord = virtual_device.geo() + point = Point("environment") \ + .tag("device", device_id) \ + .tag("TemperatureSensor", "virtual_bme280") \ + .tag("HumiditySensor", "virtual_bme280") \ + .tag("PressureSensor", "virtual_bme280") \ + .field("Temperature", virtual_device.generate_measurement()) \ + .field("Humidity", virtual_device.generate_measurement()) \ + .field("Pressure", virtual_device.generate_measurement()) \ + .field("Lat", coord['latitude']) \ + .field("Lon", coord['latitude']) \ + .time(datetime.utcnow()) + print(f"Writing: {point.to_line_protocol()}") + client_response = write_api.write(bucket=config.get('APP', 'INFLUX_BUCKET'), record=point) + # write() returns None on success + if client_response is None: + # TODO Maybe also return the data that was written + return device_id + # Return None on failure + return None +``` + +{{% caption %}}[iot-api-python/api/devices.py write_measurement()](https://github.com/influxdata/iot-api-python/blob/f354941c80b6bac643ca29efe408fde1deebdc96/api/devices.py){{% /caption %}} + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +## Query telemetry data + +In this section, you retrieve telemetry data from an InfluxDB bucket. +To retrieve data, use the InfluxDB client library to send a `POST` request to the `/api/v2/query` InfluxDB API endpoint. +The example below uses the following steps to retrieve and process telemetry data: + + 1. Query `environment` measurements in `INFLUX_BUCKET`. + 2. Filter results by `device_id`. + 3. Return CSV data that the [`influxdata/giraffe` UI library](https://github.com/influxdata/giraffe) can process. + +In `./api/devices.py`, add the following `get_measurements(device_id)` function: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Python](#python) +{{% /code-tabs %}} +{{% code-tab-content %}} + +```python +def get_measurements(query): + influxdb_client = InfluxDBClient(url=config.get('APP', 'INFLUX_URL'), + token=os.environ.get('INFLUX_TOKEN'), org=os.environ.get('INFLUX_ORG')) + query_api = QueryApi(influxdb_client) + result = query_api.query_csv(query, + dialect=Dialect( + header=True, + delimiter=",", + comment_prefix="#", + annotations=['group', 'datatype', 'default'], + date_time_format="RFC3339")) + response = '' + for row in result: + response += (',').join(row) + ('\n') + return response +``` + +{{% caption %}}[iot-api-python/api/devices.py get_measurements()](https://github.com/influxdata/iot-api-python/blob/9bf44a659424a27eb937d545dc0455754354aef5/api/devices.py#L122){{% /caption %}} + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +## Define API responses + +In `app.py`, add API endpoints that match incoming requests and respond with the results of your modules. +In the following `/api/devices/` route example, `app.py` retrieves _`device_id`_ from `GET` and `POST` requests, passes it to the `get_device(device_id)` method and returns the result as JSON data with CORS `allow-` headers. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Python](#python) +{{% /code-tabs %}} +{{% code-tab-content %}} + +```python +@app.route('/api/devices/', methods=['GET', 'POST']) +def api_get_device(device_id): + if request.method == "OPTIONS": # CORS preflight + return _build_cors_preflight_response() + return _corsify_actual_response(jsonify(devices.get_device(device_id))) +``` + +{{% caption %}}[iot-api-python/app.py](https://github.com/influxdata/iot-api-python/blob/9bf44a659424a27eb937d545dc0455754354aef5/app.py){{% /caption %}} + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +Enter the following commands into your terminal to restart the application: + + 1. `CONTROL+C` to stop the application. + 2. `flask run -h localhost -p 3001` to start the application. + +To retrieve devices data from your API, visit in your browser. + +## Install and run the UI + +`influxdata/iot-api-ui` is a standalone [Next.js React](https://nextjs.org/docs/basic-features/pages) UI that uses your application API to write and query data in InfluxDB. +`iot-api-ui` uses Next.js _[rewrites](https://nextjs.org/docs/api-reference/next.config.js/rewrites)_ to route all requests in the `/api/` path to your API. + +To install and run the UI, do the following: + +1. In your `~/iot-api-apps` directory, clone the [`influxdata/iot-api-ui` repo](https://github.com/influxdata/iot-api-ui) and go into the `iot-api-ui` directory--for example: + + ```bash + cd ~/iot-api-apps + git clone git@github.com:influxdata/iot-api-ui.git + cd ./iot-app-ui + ``` + +2. The `./.env.development` file contains default configuration settings that you can + edit or override (with a `./.env.local` file). +3. To start the UI, enter the following command into your terminal: + + ```bash + yarn dev + ``` + + To view the list and register devices, visit in your browser. + +To learn more about the UI components, see [`influxdata/iot-api-ui`](https://github.com/influxdata/iot-api-ui). diff --git a/content/influxdb/v2.5/backup-restore/_index.md b/content/influxdb/v2.5/backup-restore/_index.md new file mode 100644 index 000000000..2f7d8698c --- /dev/null +++ b/content/influxdb/v2.5/backup-restore/_index.md @@ -0,0 +1,17 @@ +--- +title: Back up and restore data +seotitle: Backup and restore data with InfluxDB +description: > + InfluxDB provides tools that let you back up and restore data and metadata stored + in InfluxDB. +influxdb/v2.5/tags: [backup, restore] +menu: + influxdb_2_5: + name: Back up & restore data +weight: 9 +products: [oss] +--- + +InfluxDB provides tools to back up and restore data and metadata stored in InfluxDB. + +{{< children >}} diff --git a/content/influxdb/v2.5/backup-restore/backup.md b/content/influxdb/v2.5/backup-restore/backup.md new file mode 100644 index 000000000..272aae2d1 --- /dev/null +++ b/content/influxdb/v2.5/backup-restore/backup.md @@ -0,0 +1,49 @@ +--- +title: Back up data +seotitle: Back up data in InfluxDB +description: > + Use the `influx backup` command to back up data and metadata stored in InfluxDB. +menu: + influxdb_2_5: + parent: Back up & restore data +weight: 101 +related: + - /influxdb/v2.5/backup-restore/restore/ + - /influxdb/v2.5/reference/cli/influx/backup/ +products: [oss] +--- + +Use the [`influx backup` command](/influxdb/v2.5/reference/cli/influx/backup/) to back up +data and metadata stored in InfluxDB. +InfluxDB copies all data and metadata to a set of files stored in a specified directory +on your local filesystem. + +{{% note %}} +#### InfluxDB 1.x/2.x compatibility +The InfluxDB {{< current-version >}} `influx backup` command is not compatible with versions of InfluxDB prior to 2.0.0. +**For information about migrating data between InfluxDB 1.x and {{< current-version >}}, see:** + +- [Automatically upgrade from InfluxDB 1.x to {{< current-version >}}](/influxdb/v2.5/upgrade/v1-to-v2/automatic-upgrade/) +- [Manually upgrade from InfluxDB 1.x to {{< current-version >}}](/influxdb/v2.5/upgrade/v1-to-v2/manual-upgrade/) +{{% /note %}} + +{{% cloud %}} +The `influx backup` command **cannot** back up data stored in **{{< cloud-name "short" >}}**. +{{% /cloud %}} + +The `influx backup` command requires: + +- The directory path for where to store the backup file set +- The **root authorization token** (the token created for the first user in the + [InfluxDB setup process](/influxdb/v2.5/get-started/)). + +##### Back up data with the influx CLI +```sh +# Syntax +influx backup -t + +# Example +influx backup \ + path/to/backup_$(date '+%Y-%m-%d_%H-%M') \ + -t xXXXX0xXX0xxX0xx_x0XxXxXXXxxXX0XXX0XXxXxX0XxxxXX0Xx0xx== +``` diff --git a/content/influxdb/v2.5/backup-restore/restore.md b/content/influxdb/v2.5/backup-restore/restore.md new file mode 100644 index 000000000..989d02ed5 --- /dev/null +++ b/content/influxdb/v2.5/backup-restore/restore.md @@ -0,0 +1,141 @@ +--- +title: Restore data +seotitle: Restore data in InfluxDB +description: > + Use the `influx restore` command to restore backup data and metadata from InfluxDB. +menu: + influxdb_2_5: + parent: Back up & restore data +weight: 101 +influxdb/v2.5/tags: [restore] +related: + - /influxdb/v2.5/backup-restore/backup/ + - /influxdb/v2.5/reference/cli/influxd/restore/ +products: [oss] +--- + +{{% cloud %}} +Restores **not supported in {{< cloud-name "short" >}}**. +{{% /cloud %}} + +Use the `influx restore` command to restore backup data and metadata from InfluxDB OSS. + +- [Restore data with the influx CLI](#restore-data-with-the-influx-cli) +- [Recover from a failed restore](#recover-from-a-failed-restore) + +InfluxDB moves existing data and metadata to a temporary location. +If the restore fails, InfluxDB preserves temporary data for recovery, +otherwise this data is deleted. +_See [Recover from a failed restore](#recover-from-a-failed-restore)._ + +{{% note %}} +#### Cannot restore to existing buckets +The `influx restore` command cannot restore data to existing buckets. +Use the `--new-bucket` flag to create a new bucket to restore data to. +To restore data and retain bucket names, [delete existing buckets](/influxdb/v2.5/organizations/buckets/delete-bucket/) +and then begin the restore process. +{{% /note %}} + +## Restore data with the influx CLI +Use the `influx restore` command and specify the path to the backup directory. + +_For more information about restore options and flags, see the +[`influx restore` documentation](/influxdb/v2.5/reference/cli/influx/restore/)._ + +- [Restore all time series data](#restore-all-time-series-data) +- [Restore data from a specific bucket](#restore-data-from-a-specific-bucket) +- [Restore and replace all InfluxDB data](#restore-and-replace-all-influxdb-data) + +### Restore all time series data +To restore all time series data from a backup directory, provide the following: + +- backup directory path + +```sh +influx restore /backups/2020-01-20_12-00/ +``` + +### Restore data from a specific bucket +To restore data from a specific backup bucket, provide the following: + +- backup directory path +- bucket name or ID + +```sh +influx restore \ + /backups/2020-01-20_12-00/ \ + --bucket example-bucket + +# OR + +influx restore \ + /backups/2020-01-20_12-00/ \ + --bucket-id 000000000000 +``` + +If a bucket with the same name as the backed up bucket already exists in InfluxDB, +use the `--new-bucket` flag to create a new bucket with a different name and +restore data into it. + +```sh +influx restore \ + /backups/2020-01-20_12-00/ \ + --bucket example-bucket \ + --new-bucket new-example-bucket +``` + +### Restore and replace all InfluxDB data +To restore and replace all time series data _and_ InfluxDB key-value data such as +tokens, users, dashboards, etc., include the following: + +- `--full` flag +- backup directory path + +```sh +influx restore \ + /backups/2020-01-20_12-00/ \ + --full +``` + +{{% note %}} +#### Restore to a new InfluxDB server +If using a backup to populate a new InfluxDB server: + +1. Retrieve the [admin token](/influxdb/v2.5/security/tokens/#admin-token) from your source InfluxDB instance. +2. Set up your new InfluxDB instance, but use the `-t`, `--token` flag to use the + **admin token** from your source instance as the admin token on your new instance. + + ```sh + influx setup --token My5uP3rSecR37t0keN + ``` +3. Restore the backup to the new server. + + ```sh + influx restore \ + /backups/2020-01-20_12-00/ \ + --full + ``` + +If you do not provide the admin token from your source InfluxDB instance as the +admin token in your new instance, the restore process and all subsequent attempts +to authenticate with the new server will fail. + +1. The first restore API call uses the auto-generated token to authenticate with + the new server and overwrites the entire key-value store in the new server, including + the auto-generated token. +2. The second restore API call attempts to upload time series data, but uses the + auto-generated token to authenticate with new server. + That token is overwritten in first restore API call and the process fails to authenticate. +{{% /note %}} + + +## Recover from a failed restore +If the restoration process fails, InfluxDB preserves existing data in a `tmp` +directory in the [target engine path](/influxdb/v2.5/reference/cli/influx/restore/#flags) +(default is `~/.influxdbv2/engine`). + +To recover from a failed restore: + +1. Copy the temporary files back into the `engine` directory. +2. Remove the `.tmp` extensions from each of the copied files. +3. Restart the `influxd` server. diff --git a/content/influxdb/v2.5/get-started.md b/content/influxdb/v2.5/get-started.md new file mode 100644 index 000000000..f08cdd97c --- /dev/null +++ b/content/influxdb/v2.5/get-started.md @@ -0,0 +1,71 @@ +--- +title: Get started with InfluxDB +description: > + Start collecting, processing, and visualizing data in InfluxDB OSS. +menu: + influxdb_2_5: + name: Get started +weight: 3 +influxdb/v2.5/tags: [get-started] +aliases: + - /influxdb/v2.5/introduction/get-started/ +--- + +After you've [installed InfluxDB OSS](/influxdb/v2.5/install/), you're ready to get started. Explore the following ways to work with your data: + +- [Collect and write data](#collect-and-write-data) +- [Query data](#query-data) +- [Process data](#process-data) +- [Visualize data](#visualize-data) +- [Monitor and alert](#monitor-and-alert) + +*Note:** To run InfluxDB, start the `influxd` daemon ([InfluxDB service](/influxdb/v2.5/reference/cli/influxd/)) using the [InfluxDB command line interface](/influxdb/v2.5/reference/cli/influx/). Once you've started the `influxd` daemon, use `localhost:8086` to log in to your InfluxDB instance. + +To start InfluxDB, do the following: + 1. Open a terminal. + 2. Type `influxd` in the command line. + +```sh +influxd +``` + +### Collect and write data + +Collect and write data to InfluxDB using the Telegraf plugins, the InfluxDB v2 API, the `influx` command line interface (CLI), the InfluxDB UI (the user interface for InfluxDB 2.5), or the InfluxDB v2 API client libraries. + +#### Use Telegraf + +Use Telegraf to quickly write data to {{< cloud-name >}}. +Create new Telegraf configurations automatically in the InfluxDB UI, or manually update an existing Telegraf configuration to send data to your {{< cloud-name "short" >}} instance. + +For details, see [Automatically configure Telegraf](/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/) +and [Manually update Telegraf configurations](/influxdb/v2.5/write-data/no-code/use-telegraf/manual-config/). + +#### Scrape data + +**InfluxDB OSS** lets you scrape Prometheus-formatted metrics from HTTP endpoints. For details, see [Scrape data](/influxdb/v2.5/write-data/no-code/scrape-data/). + +#### API, CLI, and client libraries + +For information about using the InfluxDB v2 API, `influx` CLI, and client libraries to write data, see [Write data to InfluxDB](/influxdb/v2.5/write-data/). + +### Query data + +Query data using Flux, the UI, and the `influx` command line interface. +See [Query data](/influxdb/v2.5/query-data/). + +### Process data + +Use InfluxDB tasks to process and downsample data. See [Process data](/influxdb/v2.5/process-data/). + +### Visualize data + +Build custom dashboards to visualize your data. +See [Visualize data](/influxdb/v2.5/visualize-data/). + +### Monitor and alert + +Monitor your data and sends alerts based on specified logic. +See [Monitor and alert](/influxdb/v2.5/monitor-alert/). + +{{< influxdbu "influxdb-101" >}} diff --git a/content/influxdb/v2.5/influxdb-templates/_index.md b/content/influxdb/v2.5/influxdb-templates/_index.md new file mode 100644 index 000000000..8396e8399 --- /dev/null +++ b/content/influxdb/v2.5/influxdb-templates/_index.md @@ -0,0 +1,98 @@ +--- +title: InfluxDB templates +description: > + InfluxDB templates are prepackaged InfluxDB configurations that contain everything + from dashboards and Telegraf configurations to notifications and alerts. +menu: influxdb_2_5 +weight: 10 +influxdb/v2.5/tags: [templates] +--- + +InfluxDB templates are prepackaged InfluxDB configurations that contain everything +from dashboards and Telegraf configurations to notifications and alerts. +Use templates to monitor your technology stack, +set up a fresh instance of InfluxDB, back up your dashboard configuration, or +[share your configuration](https://github.com/influxdata/community-templates/) with the InfluxData community. + +**InfluxDB templates do the following:** + +- Reduce setup time by giving you resources that are already configured for your use-case. +- Facilitate secure, portable, and source-controlled InfluxDB resource states. +- Simplify sharing and using pre-built InfluxDB solutions. + +{{< youtube 2JjW4Rym9XE >}} + +View InfluxDB community templates + +## Template manifests + +A template **manifest** is a file that defines +InfluxDB [resources](#template-resources). +Template manifests support the following formats: + +- [YAML](https://yaml.org/) +- [JSON](https://www.json.org/) +- [Jsonnet](https://jsonnet.org/) + +{{% note %}} +Template manifests are compatible with +[Kubernetes Custom Resource Definitions (CRD)](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/). +{{% /note %}} + +The `metadata.name` field in manifests uniquely identifies each resource in the template. +`metadata.name` values must be [DNS-1123](https://tools.ietf.org/html/rfc1123) compliant. +The `spec` object contains the resource configuration. + +#### Example + +```yaml +# bucket-template.yml +# Template manifest that defines two buckets. +apiVersion: influxdata.com/v2alpha1 +kind: Bucket +metadata: + name: thirsty-shaw-91b005 +spec: + description: My IoT Center Bucket + name: iot-center + retentionRules: + - everySeconds: 86400 + type: expire +--- +apiVersion: influxdata.com/v2alpha1 +kind: Bucket +metadata: + name: upbeat-fermat-91b001 +spec: + name: air_sensor +--- +``` + +_See [Create an InfluxDB template](/influxdb/v2.5/influxdb-templates/create/) for information about +generating template manifests._ + +### Template resources + +Templates may contain the following InfluxDB resources: + +- [buckets](/influxdb/v2.5/organizations/buckets/create-bucket/) +- [checks](/influxdb/v2.5/monitor-alert/checks/create/) +- [dashboards](/influxdb/v2.5/visualize-data/dashboards/create-dashboard/) +- [dashboard variables](/influxdb/v2.5/visualize-data/variables/create-variable/) +- [labels](/influxdb/v2.5/visualize-data/labels/) +- [notification endpoints](/influxdb/v2.5/monitor-alert/notification-endpoints/create/) +- [notification rules](/influxdb/v2.5/monitor-alert/notification-rules/create/) +- [tasks](/influxdb/v2.5/process-data/manage-tasks/create-task/) +- [Telegraf configurations](/influxdb/v2.5/write-data/no-code/use-telegraf/) + +## Stacks + +Use **InfluxDB stacks** to manage InfluxDB templates. +When you apply a template, InfluxDB associates resources in the template with a stack. +Use stacks to add, update, or remove InfluxDB templates over time. + +For more information, see [InfluxDB stacks](#influxdb-stacks) below. + +--- + +{{< children >}} diff --git a/content/influxdb/v2.5/influxdb-templates/create.md b/content/influxdb/v2.5/influxdb-templates/create.md new file mode 100644 index 000000000..05fa58671 --- /dev/null +++ b/content/influxdb/v2.5/influxdb-templates/create.md @@ -0,0 +1,291 @@ +--- +title: Create an InfluxDB template +description: > + Use the InfluxDB UI and the `influx export` command to create InfluxDB templates. +menu: + influxdb_2_5: + parent: InfluxDB templates + name: Create a template + identifier: Create an InfluxDB template +weight: 103 +influxdb/v2.5/tags: [templates] +related: + - /influxdb/v2.5/reference/cli/influx/export/ + - /influxdb/v2.5/reference/cli/influx/export/all/ +--- + +Use the InfluxDB user interface (UI) and the [`influx export` command](/influxdb/v2.5/reference/cli/influx/export/) to +create InfluxDB templates from [resources](/influxdb/v2.5/influxdb-templates/#template-resources) in an organization. +Add buckets, Telegraf configurations, tasks, and more in the InfluxDB +UI and then export those resources as a template. + +{{< youtube 714uHkxKM6U >}} + +- [Create a template](#create-a-template) +- [Export resources to a template](#export-resources-to-a-template) +- [Include user-definable resource names](#include-user-definable-resource-names) +- [Troubleshoot template results and permissions](#troubleshoot-template-results-and-permissions) +- [Share your InfluxDB templates](#share-your-influxdb-templates) + +## Create a template + +Creating a new organization to contain only your template resources is an easy way +to ensure you export the resources you want. +Follow these steps to create a template from a new organization. + +1. [Start InfluxDB](/influxdb/v2.5/get-started/). +2. [Create a new organization](/influxdb/v2.5/organizations/create-org/). +3. In the InfluxDB UI, add one or more [resources](/influxdb/v2.5/influxdb-templates/#template-resources). +4. [Create an **All-Access** API token](/influxdb/v2.5/security/tokens/create-token/) (or a token that has **read** access to the organization). +5. Use the API token from **Step 4** with the [`influx export all` subcommand](/influxdb/v2.5/reference/cli/influx/export/all/) to [export all resources]() in the organization to a template file. + + ```sh + influx export all \ + -o YOUR_INFLUX_ORG \ + -t YOUR_ALL_ACCESS_TOKEN \ + -f ~/templates/template.yml + ``` + +## Export resources to a template + +The [`influx export` command](/influxdb/v2.5/reference/cli/influx/export/) and subcommands let you +export [resources](#template-resources) from an organization to a template manifest. +Your [API token](/influxdb/v2.5/security/tokens/) must have **read** access to resources that you want to export. + +If you want to export resources that depend on other resources, be sure to export the dependencies. + +{{< cli/influx-creds-note >}} + +To create a template that **adds, modifies, and deletes resources** when applied to an organization, use [InfluxDB stacks](/influxdb/v2.5/influxdb-templates/stacks/). +First, [initialize the stack](/influxdb/v2.5/influxdb-templates/stacks/init/) +and then [export the stack](#export-a-stack). + +To create a template that only **adds resources** when applied to an organization (and doesn't modify existing resources there), choose one of the following: +- [Export all resources](#export-all-resources) to export all resources or a filtered + subset of resources to a template. +- [Export specific resources](#export-specific-resources) by name or ID to a template. + +### Export all resources + +To export all [resources](/influxdb/v2.5/influxdb-templates/#template-resources) +within an organization to a template manifest file, use the +[`influx export all` subcommand](/influxdb/v2.5/reference/cli/influx/export/all/) +with the `--file` (`-f`) option. + +Provide the following: + +- **Destination path and filename** for the template manifest. + The filename extension determines the output format: + - `your-template.yml`: [YAML](https://yaml.org/) format + - `your-template.json`: [JSON](https://json.org/) format + +```sh +# Syntax +influx export all -f +``` + +#### Export resources filtered by labelName or resourceKind + +The [`influx export all` subcommand](/influxdb/v2.5/reference/cli/influx/export/all/) +accepts a `--filter` option that exports +only resources that match specified label names or resource kinds. +To filter on label name *and* resource kind, provide a `--filter` for each. + +#### Export only dashboards and buckets with specific labels + +The following example exports resources that match this predicate logic: + +```js +(resourceKind == "Bucket" or resourceKind == "Dashboard") +and +(labelName == "Example1" or labelName == "Example2") +``` + +```sh +influx export all \ + -f ~/templates/template.yml \ + --filter=resourceKind=Bucket \ + --filter=resourceKind=Dashboard \ + --filter=labelName=Example1 \ + --filter=labelName=Example2 +``` + +For more options and examples, see the +[`influx export all` subcommand](/influxdb/v2.5/reference/cli/influx/export/all/). + +### Export specific resources + +To export specific [resources](/influxdb/v2.5/influxdb-templates/#template-resources) by name or ID, use the **[`influx export` command](/influxdb/v2.5/reference/cli/influx/export/)** with one or more lists of resources to include. + +Provide the following: + +- **Destination path and filename** for the template manifest. + The filename extension determines the output format: + - `your-template.yml`: [YAML](https://yaml.org/) format + - `your-template.json`: [JSON](https://json.org/) format +- **Resource options** with corresponding lists of resource IDs or resource names to include in the template. + For information about what resource options are available, see the + [`influx export` command](/influxdb/v2.5/reference/cli/influx/export/). + +```sh +# Syntax +influx export -f [resource-flags] +``` + +#### Export specific resources by ID +```sh +influx export \ + --org-id ed32b47572a0137b \ + -f ~/templates/template.yml \ + -t $INFLUX_TOKEN \ + --buckets=00x000ooo0xx0xx,o0xx0xx00x000oo \ + --dashboards=00000xX0x0X00x000 \ + --telegraf-configs=00000x0x000X0x0X0 +``` + +#### Export specific resources by name +```sh +influx export \ + --org-id ed32b47572a0137b \ + -f ~/templates/template.yml \ + --bucket-names=bucket1,bucket2 \ + --dashboard-names=dashboard1,dashboard2 \ + --telegraf-config-names=telegrafconfig1,telegrafconfig2 +``` + +### Export a stack + +To export an InfluxDB [stack](/influxdb/v2.5/influxdb-templates/stacks/) and all its associated resources as a template, use the +`influx export stack` command. +Provide the following: + +- **Organization name** or **ID** +- **API token** with read access to the organization +- **Destination path and filename** for the template manifest. + The filename extension determines the output format: + - `your-template.yml`: [YAML](https://yaml.org/) format + - `your-template.json`: [JSON](https://json.org/) format +- **Stack ID** + +#### Export a stack as a template + +```sh +# Syntax +influx export stack \ + -o \ + -t \ + -f \ + + +# Example +influx export stack \ + -o my-org \ + -t mYSuP3RS3CreTt0K3n + -f ~/templates/awesome-template.yml \ + 05dbb791a4324000 +``` + +## Include user-definable resource names + +After exporting a template manifest, replace resource names with **environment references** +to let users customize resource names when installing your template. + +1. [Export a template](#export-a-template). +2. Select any of the following resource fields to update: + + - `metadata.name` + - `associations[].name` + - `endpointName` _(unique to `NotificationRule` resources)_ + +3. Replace the resource field value with an `envRef` object with a `key` property + that references the key of a key-value pair the user provides when installing the template. + During installation, the `envRef` object is replaced by the value of the + referenced key-value pair. + If the user does not provide the environment reference key-value pair, InfluxDB + uses the `key` string as the default value. + + {{< code-tabs-wrapper >}} + {{% code-tabs %}} +[YAML](#) +[JSON](#) + {{% /code-tabs %}} + {{% code-tab-content %}} +```yml +apiVersion: influxdata.com/v2alpha1 +kind: Bucket +metadata: + name: + envRef: + key: bucket-name-1 +``` + {{% /code-tab-content %}} + {{% code-tab-content %}} +```json +{ + "apiVersion": "influxdata.com/v2alpha1", + "kind": "Bucket", + "metadata": { + "name": { + "envRef": { + "key": "bucket-name-1" + } + } + } +} +``` + {{% /code-tab-content %}} + {{< /code-tabs-wrapper >}} + +Using the example above, users are prompted to provide a value for `bucket-name-1` +when [applying the template](/influxdb/v2.5/influxdb-templates/use/#apply-templates). +Users can also include the `--env-ref` flag with the appropriate key-value pair +when installing the template. + +```sh +# Set bucket-name-1 to "myBucket" +influx apply \ + -f /path/to/template.yml \ + --env-ref=bucket-name-1=myBucket +``` + +_If sharing your template, we recommend documenting what environment references +exist in the template and what keys to use to replace them._ + +{{% note %}} +#### Resource fields that support environment references + +Only the following fields support environment references: + +- `metadata.name` +- `spec.endpointName` +- `spec.associations.name` +{{% /note %}} + +## Troubleshoot template results and permissions + +If you get unexpected results, missing resources, or errors when exporting +templates, check the following: +- [Ensure `read` access](#ensure-read-access) +- [Use Organization ID](#use-organization-id) +- [Check for resource dependencies](#check-for-resource-dependencies) + +### Ensure read access + +The [API token](/influxdb/v2.5/security/tokens/) must have **read** access to resources that you want to export. The `influx export all` command only exports resources that the API token can read. For example, to export all resources in an organization that has ID `abc123`, the API token must have the `read:/orgs/abc123` permission. + +To learn more about permissions, see [how to view authorizations](/influxdb/v2.5/security/tokens/view-tokens/) and [how to create a token](/influxdb/v2.5/security/tokens/create-token/) with specific permissions. + +### Use Organization ID + +If your token doesn't have **read** access to the organization and you want to [export specific resources](#export-specific-resources), use the `--org-id ` flag (instead of `-o ` or `--org `) to provide the organization. + +### Check for resource dependencies + +If you want to export resources that depend on other resources, be sure to export the dependencies as well. Otherwise, the resources may not be usable. + +## Share your InfluxDB templates + +Share your InfluxDB templates with the entire InfluxData community. +Contribute your template to the [InfluxDB Community Templates](https://github.com/influxdata/community-templates/) repository on GitHub. + +View InfluxDB Community Templates diff --git a/content/influxdb/v2.5/influxdb-templates/stacks/_index.md b/content/influxdb/v2.5/influxdb-templates/stacks/_index.md new file mode 100644 index 000000000..bd156a5eb --- /dev/null +++ b/content/influxdb/v2.5/influxdb-templates/stacks/_index.md @@ -0,0 +1,26 @@ +--- +title: InfluxDB stacks +description: > + Use an InfluxDB stack to manage your InfluxDB templates—add, update, or remove templates over time. +menu: + influxdb_2_5: + parent: InfluxDB templates +weight: 105 +related: + - /influxdb/v2.5/reference/cli/influx/pkg/stack/ +--- + +Use InfluxDB stacks to manage [InfluxDB templates](/influxdb/v2.5/influxdb-templates). +When you apply a template, InfluxDB associates resources in the template with a stack. Use the stack to add, update, or remove InfluxDB templates over time. + + {{< children type="anchored-list" >}} + + {{< children readmore=true >}} + +{{% note %}} +**Key differences between stacks and templates**: + +- A template defines a set of resources in a text file outside of InfluxDB. When you apply a template, a stack is automatically created to manage the applied template. +- Stacks add, modify or delete resources in an instance. +- Templates do not recognize resources in an instance. All resources in the template are added, creating duplicate resources if a resource already exists. + {{% /note %}} diff --git a/content/influxdb/v2.5/influxdb-templates/stacks/init.md b/content/influxdb/v2.5/influxdb-templates/stacks/init.md new file mode 100644 index 000000000..6afb12e88 --- /dev/null +++ b/content/influxdb/v2.5/influxdb-templates/stacks/init.md @@ -0,0 +1,73 @@ +--- +title: Initialize an InfluxDB stack +list_title: Initialize a stack +description: > + InfluxDB automatically creates a new stack each time you [apply an InfluxDB template](/influxdb/v2.5/influxdb-templates/use/) + **without providing a stack ID**. + To manually create or initialize a new stack, use the [`influx stacks init` command](/influxdb/v2.5/reference/cli/influx/stacks/init/). +menu: + influxdb_2_5: + parent: InfluxDB stacks + name: Initialize a stack +weight: 202 +related: + - /influxdb/v2.5/reference/cli/influx/stacks/init/ +list_code_example: | + ```sh + influx apply \ + -o example-org \ + -f path/to/template.yml + ``` + ```sh + influx stacks init \ + -o example-org \ + -n "Example Stack" \ + -d "InfluxDB stack for monitoring some awesome stuff" \ + -u https://example.com/template-1.yml \ + -u https://example.com/template-2.yml + ``` +--- + +InfluxDB automatically creates a new stack each time you [apply an InfluxDB template](/influxdb/v2.5/influxdb-templates/use/) +**without providing a stack ID**. +To manually create or initialize a new stack, use the [`influx stacks init` command](/influxdb/v2.5/reference/cli/influx/stacks/init/). + +## Initialize a stack when applying a template +To automatically create a new stack when [applying an InfluxDB template](/influxdb/v2.5/influxdb-templates/use/) +**don't provide a stack ID**. +InfluxDB applies the resources in the template to a new stack and provides the **stack ID** the output. + +```sh +influx apply \ + -o example-org \ + -f path/to/template.yml +``` + +## Manually initialize a new stack +Use the [`influx stacks init` command](/influxdb/v2.5/reference/cli/influx/stacks/init/) +to create or initialize a new InfluxDB stack. + +**Provide the following:** + +- Organization name or ID +- Stack name +- Stack description +- InfluxDB template URLs + + +```sh +# Syntax +influx stacks init \ + -o \ + -n \ + -d + +# Example +influx stacks init \ + -o example-org \ + -n "Example Stack" \ + -d "InfluxDB stack for monitoring some awesome stuff" \ + -u https://example.com/template-1.yml \ + -u https://example.com/template-2.yml +``` diff --git a/content/influxdb/v2.5/influxdb-templates/stacks/remove.md b/content/influxdb/v2.5/influxdb-templates/stacks/remove.md new file mode 100644 index 000000000..85783e974 --- /dev/null +++ b/content/influxdb/v2.5/influxdb-templates/stacks/remove.md @@ -0,0 +1,39 @@ +--- +title: Remove an InfluxDB stack +list_title: Remove a stack +description: > + Use the [`influx stacks remove` command](/influxdb/v2.5/reference/cli/influx/stacks/remove/) + to remove an InfluxDB stack and all its associated resources. +menu: + influxdb_2_5: + parent: InfluxDB stacks + name: Remove a stack +weight: 205 +related: + - /influxdb/v2.5/reference/cli/influx/stacks/remove/ +list_code_example: | + ```sh + influx stacks remove \ + -o example-org \ + --stack-id=12ab34cd56ef + ``` +--- + +Use the [`influx stacks remove` command](/influxdb/v2.5/reference/cli/influx/stacks/remove/) +to remove an InfluxDB stack and all its associated resources. + +**Provide the following:** + +- Organization name or ID +- Stack ID + + +```sh +# Syntax +influx stacks remove -o --stack-id= + +# Example +influx stacks remove \ + -o example-org \ + --stack-id=12ab34cd56ef +``` diff --git a/content/influxdb/v2.5/influxdb-templates/stacks/save-time.md b/content/influxdb/v2.5/influxdb-templates/stacks/save-time.md new file mode 100644 index 000000000..f52f4c733 --- /dev/null +++ b/content/influxdb/v2.5/influxdb-templates/stacks/save-time.md @@ -0,0 +1,165 @@ +--- +title: Save time with InfluxDB stacks +list_title: Save time with stacks +description: > + Discover how to use InfluxDB stacks to save time. +menu: + influxdb_2_5: + parent: InfluxDB stacks + name: Save time with stacks +weight: 201 +related: + - /influxdb/v2.5/reference/cli/influx/stacks/ + +--- + +Save time and money using InfluxDB stacks. Here's a few ideal use cases: + +- [Automate deployments with GitOps and stacks](#automate-deployments-with-gitops-and-stacks) +- [Apply updates from source-controlled templates](#apply-updates-from-source-controlled-templates) +- [Apply template updates across multiple InfluxDB instances](#apply-template-updates-across-multiple-influxdb-instances) +- [Develop templates](#develop-templates) + +### Automate deployments with GitOps and stacks + +GitOps is popular way to configure and automate deployments. Use InfluxDB stacks in a GitOps workflow +to automatically update distributed instances of InfluxDB OSS or InfluxDB Cloud. + +To automate an InfluxDB deployment with GitOps and stacks, complete the following steps: + +1. [Set up a GitHub repository](#set-up-a-github-repository) +2. [Add existing resources to the GitHub repository](#add-existing-resources-to-the-github-repository) +3. [Automate the creation of a stack for each folder](#automate-the-creation-of-a-stack-for-each-folder) +4. [Set up Github Actions or CircleCI](#set-up-github-actions-or-circleci) + +#### Set up a GitHub repository + +Set up a GitHub repository to back your InfluxDB instance. Determine how you want to organize the resources in your stacks within your Github repository. For example, organize resources under folders for specific teams or functions. + +We recommend storing all resources for one stack in the same folder. For example, if you monitor Redis, create a `redis` stack and put your Redis monitoring resources (a Telegraf configuration, four dashboards, a label, and two alert checks) into one Redis folder, each resource in a separate file. Then, when you need to update a Redis resource, it's easy to find and make changes in one location. + + {{% note %}} + Typically, we **do not recommend** using the same resource in multiple stacks. If your organization uses the same resource in multiple stacks, before you delete a stack, verify the stack does not include resources that another stack depends on. Stacks with buckets often contain data used by many different templates. Because of this, we recommend keeping buckets separate from the other stacks. + {{% /note %}} + +#### Add existing resources to the GitHub repository + +Skip this section if you are starting from scratch or don’t have existing resources you want to add to your stack. + +Use the `influx export` command to quickly export resources. Keep all your resources in a single file or have files for each one. You can always split or combine them later. + +For example, if you export resources for three stacks: `buckets`, `redis`, and `mysql`, your folder structure might look something like this when you are done: + + ```sh + influxdb-assets/ + ├── buckets/ + │ ├── telegraf_bucket.yml + ├── redis/ + │ ├── redis_overview_dashboard.yml + │ ├── redis_label.yml + │ ├── redis_cpu_check.yml + │ └── redis_mem_check.yml + ├── mysql/ + │ ├── mysql_assets.yml + └── README.md + + ``` + {{% note %}} + When you export a resource, InfluxDB creates a `meta.name` for that resource. These resource names should be unique inside your InfluxDB instance. Use a good naming convention to prevent duplicate `meta.names`. Changing the `meta.name` of the InfluxDB resource will cause the stack to orphan the resource with the previous name and create a new resource with the updated name. + {{% /note %}} + +Add the exported resources to your new GitHub repository. + +#### Automate the creation of a stack for each folder + +To automatically create a stack from each folder in your GitHub repository, create a shell script to check for an existing stack and if the stack isn't found, use the `influx stacks init` command to create a new stack. The following sample script creates a `redis` stack and automatically applies those changes to your instance: + +```sh +echo "Checking for existing redis stack..." +REDIS_STACK_ID=$(influx stacks --stack-name redis --json | jq -r '.[0].ID') +if [ "$REDIS_STACK_ID" == "null" ]; then + echo "No stack found. Initializing our stack..." + REDIS_STACK_ID=$(influx stacks init -n redis --json | jq -r '.ID') +fi + +# Setting the base path +BASE_PATH="$(pwd)" + +echo "Applying our redis stack..." +cat $BASE_PATH/redis/*.yml | \ +influx apply --force true --stack-id $REDIS_STACK_ID -q +``` + + {{% note %}} + The `--json` flag in the InfluxDB CLI is very useful when scripting against the CLI. This flag lets you grab important information easily using [`jq`](https://stedolan.github.io/jq/manual/v1.6/). + {{% /note %}} + +Repeat this step for each of the stacks in your repository. When a resource in your stack changes, re-run this script to apply updated resources to your InfluxDB instance. Re-applying a stack with an updated resource won't add, delete, or duplicate resources. + +#### Set up Github Actions or CircleCI + +Once you have a script to apply changes being made to your local instance, automate the deployment to other environments as needed. Use the InfluxDB CLI to maintain multiple [configuration profiles]() to easily switch profile and issue commands against other InfluxDB instances. To apply the same script to a different InfluxDB instance, change your active configuration profile using the `influx config set` command. Or set the desired profile dynamically using the `-c, --active-config` flag. + + {{% note %}} + Before you run automation scripts against shared environments, we recommend manually running the steps in your script. + {{% /note %}} + +Verify your deployment automation software lets you run a custom script, and then set up the custom script you've built locally another environment. For example, here's a custom Github Action that automates deployment: + +```yml +name: deploy-influxdb-resources + +on: + push: + branches: [ master ] + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ github.ref }} + - name: Deploys repo to cloud + env: + # These secrets can be configured in the Github repo to connect to + # your InfluxDB instance. + INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }} + INFLUX_ORG: ${{ secrets.INFLUX_ORG }} + INFLUX_URL: ${{ secrets.INFLUX_URL }} + GITHUB_REPO: ${{ github.repository }} + GITHUB_BRANCH: ${{ github.ref }} + run: | + cd /tmp + wget https://dl.influxdata.com/platform/nightlies/influx_nightly_linux_amd64.tar.gz + tar xvfz influx_nightly_linux_amd64.tar.gz + sudo cp influx_nightly_linux_amd64/influx /usr/local/bin/ + cd $GITHUB_WORKSPACE + # This runs the script to set up your stacks + chmod +x ./setup.sh + ./setup.sh prod +``` + +For more information about using GitHub Actions in your project, check out the complete [Github Actions documentation](https://github.com/features/actions). + +### Apply updates from source-controlled templates + +You can use a variety of InfluxDB templates from many different sources including +[Community Templates](https://github.com/influxdata/community-templates/) or +self-built custom templates. +As templates are updated over time, stacks let you gracefully +apply updates without creating duplicate resources. + +### Apply template updates across multiple InfluxDB instances + +In many cases, you may have more than one instance of InfluxDB running and want to apply +the same template to each separate instance. +Using stacks, you can make changes to a stack on one instance, +[export the stack as a template](/influxdb/v2.5/influxdb-templates/create/#export-a-stack) +and then apply the changes to your other InfluxDB instances. + +### Develop templates + +InfluxDB stacks aid in developing and maintaining InfluxDB templates. +Stacks let you modify and update template manifests and apply those changes in +any stack that uses the template. diff --git a/content/influxdb/v2.5/influxdb-templates/stacks/update.md b/content/influxdb/v2.5/influxdb-templates/stacks/update.md new file mode 100644 index 000000000..c288ab36d --- /dev/null +++ b/content/influxdb/v2.5/influxdb-templates/stacks/update.md @@ -0,0 +1,56 @@ +--- +title: Update an InfluxDB stack +list_title: Update a stack +description: > + Use the [`influx apply` command](/influxdb/v2.5/reference/cli/influx/apply/) + to update a stack with a modified template. + When applying a template to an existing stack, InfluxDB checks to see if the + resources in the template match existing resources. + InfluxDB updates, adds, and removes resources to resolve differences between + the current state of the stack and the newly applied template. +menu: + influxdb_2_5: + parent: InfluxDB stacks + name: Update a stack +weight: 203 +related: + - /influxdb/v2.5/reference/cli/influx/apply + - /influxdb/v2.5/reference/cli/influx/stacks/update/ +list_code_example: | + ```sh + influx apply \ + -o example-org \ + -u http://example.com/template-1.yml \ + -u http://example.com/template-2.yml \ + --stack-id=12ab34cd56ef + ``` +--- + +Use the [`influx apply` command](/influxdb/v2.5/reference/cli/influx/apply/) +to update a stack with a modified template. +When applying a template to an existing stack, InfluxDB checks to see if the +resources in the template match existing resources. +InfluxDB updates, adds, and removes resources to resolve differences between +the current state of the stack and the newly applied template. + +Each stack is uniquely identified by a **stack ID**. +For information about retrieving your stack ID, see [View stacks](/influxdb/v2.5/influxdb-templates/stacks/view/). + +**Provide the following:** + +- Organization name or ID +- Stack ID +- InfluxDB template URLs to apply + + +```sh +influx apply \ + -o example-org \ + -u http://example.com/template-1.yml \ + -u http://example.com/template-2.yml \ + --stack-id=12ab34cd56ef +``` + +Template resources are uniquely identified by their `metadata.name` field. +If errors occur when applying changes to a stack, all applied changes are +reversed and the stack is returned to its previous state. diff --git a/content/influxdb/v2.5/influxdb-templates/stacks/view.md b/content/influxdb/v2.5/influxdb-templates/stacks/view.md new file mode 100644 index 000000000..599022a6b --- /dev/null +++ b/content/influxdb/v2.5/influxdb-templates/stacks/view.md @@ -0,0 +1,69 @@ +--- +title: View InfluxDB stacks +list_title: View stacks +description: > + Use the [`influx stacks` command](/influxdb/v2.5/reference/cli/influx/stacks/) + to view installed InfluxDB stacks and their associated resources. +menu: + influxdb_2_5: + parent: InfluxDB stacks + name: View stacks +weight: 204 +related: + - /influxdb/v2.5/reference/cli/influx/stacks/ +list_code_example: | + ```sh + influx stacks -o example-org + ``` +--- + +Use the [`influx stacks` command](/influxdb/v2.5/reference/cli/influx/stacks/) +to view installed InfluxDB stacks and their associated resources. + +**Provide the following:** + +- Organization name or ID + + +```sh +# Syntax +influx stacks -o + +# Example +influx stacks -o example-org +``` + +### Filter stacks + +To output information about specific stacks, use the `--stack-name` or `--stack-id` +flags to filter output by stack names or stack IDs. + +##### Filter by stack name + +```sh +# Syntax +influx stacks \ + -o \ + --stack-name= + +# Example +influx stacks \ + -o example-org \ + --stack-name=stack1 \ + --stack-name=stack2 +``` + +### Filter by stack ID + +```sh +# Syntax +influx stacks \ + -o \ + --stack-id= + +# Example +influx stacks \ + -o example-org \ + --stack-id=12ab34cd56ef \ + --stack-id=78gh910i11jk +``` diff --git a/content/influxdb/v2.5/influxdb-templates/use.md b/content/influxdb/v2.5/influxdb-templates/use.md new file mode 100644 index 000000000..e98f028e7 --- /dev/null +++ b/content/influxdb/v2.5/influxdb-templates/use.md @@ -0,0 +1,241 @@ +--- +title: Use InfluxDB templates +description: > + Use the `influx` command line interface (CLI) to summarize, validate, and apply + templates from your local filesystem and from URLs. +menu: + influxdb_2_5: + parent: InfluxDB templates + name: Use templates +weight: 102 +influxdb/v2.5/tags: [templates] +related: + - /influxdb/v2.5/reference/cli/influx/apply/ + - /influxdb/v2.5/reference/cli/influx/template/ + - /influxdb/v2.5/reference/cli/influx/template/validate/ +--- + +Use the `influx` command line interface (CLI) to summarize, validate, and apply +templates from your local filesystem and from URLs. + +- [Use InfluxDB community templates](#use-influxdb-community-templates) +- [View a template summary](#view-a-template-summary) +- [Validate a template](#validate-a-template) +- [Apply templates](#apply-templates) + + +## Use InfluxDB community templates +The [InfluxDB community templates repository](https://github.com/influxdata/community-templates/) +is home to a growing number of InfluxDB templates developed and maintained by +others in the InfluxData community. +Apply community templates directly from GitHub using a template's download URL +or download the template. + +{{< youtube 2JjW4Rym9XE >}} + +{{% note %}} +When attempting to access the community templates via the URL, the templates use the following +as the root of the URL: + +```sh +https://raw.githubusercontent.com/influxdata/community-templates/master/ +``` + +For example, the Docker community template can be accessed via: + +```sh +https://raw.githubusercontent.com/influxdata/community-templates/master/docker/docker.yml +``` +{{% /note %}} + +View InfluxDB Community Templates + +## View a template summary +To view a summary of what's included in a template before applying the template, +use the [`influx template` command](/influxdb/v2.5/reference/cli/influx/template/). +View a summary of a template stored in your local filesystem or from a URL. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[From a file](#) +[From a URL](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +# Syntax +influx template -f + +# Example +influx template -f /path/to/template.yml +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sh +# Syntax +influx template -u + +# Example +influx template -u https://raw.githubusercontent.com/influxdata/community-templates/master/linux_system/linux_system.yml +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +## Validate a template +To validate a template before you install it or troubleshoot a template, use +the [`influx template validate` command](/influxdb/v2.5/reference/cli/influx/template/validate/). +Validate a template stored in your local filesystem or from a URL. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[From a file](#) +[From a URL](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +# Syntax +influx template validate -f + +# Example +influx template validate -f /path/to/template.yml +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sh +# Syntax +influx template validate -u + +# Example +influx template validate -u https://raw.githubusercontent.com/influxdata/community-templates/master/linux_system/linux_system.yml +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +## Apply templates +Use the [`influx apply` command](/influxdb/v2.5/reference/cli/influx/apply/) to install templates +from your local filesystem or from URLs. + +- [Apply a template from a file](#apply-a-template-from-a-file) +- [Apply all templates in a directory](#apply-all-templates-in-a-directory) +- [Apply a template from a URL](#apply-a-template-from-a-url) +- [Apply templates from both files and URLs](#apply-templates-from-both-files-and-urls) +- [Define environment references](#define-environment-references) +- [Include a secret when installing a template](#include-a-secret-when-installing-a-template) + +{{% note %}} +#### Apply templates to an existing stack +To apply a template to an existing stack, include the stack ID when applying the template. +Any time you apply a template without a stack ID, InfluxDB initializes a new stack +and all new resources. +For more information, see [InfluxDB stacks](/influxdb/v2.5/influxdb-templates/stacks/). +{{% /note %}} + +### Apply a template from a file +To install templates stored on your local machine, use the `-f` or `--file` flag +to provide the **file path** of the template manifest. + +```sh +# Syntax +influx apply -o -f + +# Examples +# Apply a single template +influx apply -o example-org -f /path/to/template.yml + +# Apply multiple templates +influx apply -o example-org \ + -f /path/to/this/template.yml \ + -f /path/to/that/template.yml +``` + +### Apply all templates in a directory +To apply all templates in a directory, use the `-f` or `--file` flag to provide +the **directory path** of the directory where template manifests are stored. +By default, this only applies templates stored in the specified directory. +To apply all templates stored in the specified directory and its subdirectories, +include the `-R`, `--recurse` flag. + +```sh +# Syntax +influx apply -o -f + +# Examples +# Apply all templates in a directory +influx apply -o example-org -f /path/to/template/dir/ + +# Apply all templates in a directory and its subdirectories +influx apply -o example-org -f /path/to/template/dir/ -R +``` + +### Apply a template from a URL +To apply templates from a URL, use the `-u` or `--template-url` flag to provide the URL +of the template manifest. + +```sh +# Syntax +influx apply -o -u + +# Examples +# Apply a single template from a URL +influx apply -o example-org -u https://example.com/templates/template.yml + +# Apply multiple templates from URLs +influx apply -o example-org \ + -u https://example.com/templates/template1.yml \ + -u https://example.com/templates/template2.yml +``` + +### Apply templates from both files and URLs +To apply templates from both files and URLs in a single command, include multiple +file or directory paths and URLs, each with the appropriate `-f` or `-u` flag. + +```sh +# Syntax +influx apply -o -u -f + +# Example +influx apply -o example-org \ + -u https://example.com/templates/template1.yml \ + -u https://example.com/templates/template2.yml \ + -f ~/templates/custom-template.yml \ + -f ~/templates/iot/home/ \ + --recurse +``` + +### Define environment references +Some templates include [environment references](/influxdb/v2.5/influxdb-templates/create/#include-user-definable-resource-names) that let you provide custom resource names. +The `influx apply` command prompts you to provide a value for each environment +reference in the template. +You can also provide values for environment references by including an `--env-ref` +flag with a key-value pair comprised of the environment reference key and the +value to replace it. + +```sh +influx apply -o example-org -f /path/to/template.yml \ + --env-ref=bucket-name-1=myBucket + --env-ref=label-name-1=Label1 \ + --env-ref=label-name-2=Label2 +``` + +### Include a secret when installing a template +Some templates use [secrets](/influxdb/v2.5/security/secrets/) in queries. +Secret values are not included in templates. +To define secret values when installing a template, include the `--secret` flag +with the secret key-value pair. + +```sh +# Syntax +influx apply -o -f \ + --secret== + +# Examples +# Define a single secret when applying a template +influx apply -o example-org -f /path/to/template.yml \ + --secret=FOO=BAR + +# Define multiple secrets when applying a template +influx apply -o example-org -f /path/to/template.yml \ + --secret=FOO=bar \ + --secret=BAZ=quz +``` + +_To add a secret after applying a template, see [Add secrets](/influxdb/v2.5/security/secrets/manage-secrets/add/)._ diff --git a/content/influxdb/v2.5/install.md b/content/influxdb/v2.5/install.md new file mode 100644 index 000000000..5c439ab7d --- /dev/null +++ b/content/influxdb/v2.5/install.md @@ -0,0 +1,859 @@ +--- +title: Install InfluxDB +description: Download, install, and set up InfluxDB OSS. +menu: influxdb_2_5 +weight: 2 +influxdb/v2.5/tags: [install] +related: +- /influxdb/v2.5/reference/cli/influx/auth/ +- /influxdb/v2.5/reference/cli/influx/config/ +- /influxdb/v2.5/reference/cli/influx/ +- /influxdb/v2.5/security/tokens/ +--- + +The InfluxDB {{< current-version >}} time series platform is purpose-built to collect, store, +process and visualize metrics and events. +Download, install, and set up InfluxDB OSS. + +{{< tabs-wrapper >}} +{{% tabs %}} +[macOS](#) +[Linux](#) +[Windows](#) +[Docker](#) +[Kubernetes](#) +[Raspberry Pi](#) +{{% /tabs %}} + + +{{% tab-content %}} +## Install InfluxDB v{{< current-version >}} + +Do one of the following: + +- [Use Homebrew](#use-homebrew) +- [Manually download and install](#manually-download-and-install) + +{{% note %}} +#### InfluxDB and the influx CLI are separate packages + +The InfluxDB server ([`influxd`](/influxdb/v2.5/reference/cli/influxd/)) and the +[`influx` CLI](/influxdb/v2.5/reference/cli/influx/) are packaged and +versioned separately. +For information about installing the `influx` CLI, see +[Install and use the influx CLI](/influxdb/v2.5/tools/influx-cli/). +{{% /note %}} + +### Use Homebrew + +We recommend using [Homebrew](https://brew.sh/) to install InfluxDB v{{< current-version >}} on macOS: + +```sh +brew update +brew install influxdb +``` + +{{% note %}} +Homebrew also installs `influxdb-cli` as a dependency. +For information about using the `influx` CLI, see the +[`influx` CLI reference documentation](/influxdb/v2.5/reference/cli/influx/). +{{% /note %}} + +### Manually download and install + +To download the InfluxDB v{{< current-version >}} binaries for macOS directly, +do the following: + +1. **Download the InfluxDB package.** + + InfluxDB v{{< current-version >}} (macOS) + + +2. **Unpackage the InfluxDB binary.** + + Do one of the following: + + - Double-click the downloaded package file in **Finder**. + - Run the following command in a macOS command prompt application such + **Terminal** or **[iTerm2](https://www.iterm2.com/)**: + + ```sh + # Unpackage contents to the current working directory + tar zxvf ~/Downloads/influxdb2-{{< latest-patch >}}-darwin-amd64.tar.gz + ``` + +3. **(Optional) Place the binary in your `$PATH`** + + ```sh + # (Optional) Copy the influxd binary to your $PATH + sudo cp influxdb2-{{< latest-patch >}}-darwin-amd64/influxd /usr/local/bin/ + ``` + + If you do not move the `influxd` binary into your `$PATH`, prefix the executable + `./` to run it in place. + +{{< expand-wrapper >}} +{{% expand "Recommended – Set appropriate directory permissions" %}} + +To prevent unwanted access to data, we recommend setting the permissions on the influxdb `data-dir` to not be world readable. For server installs, it is also recommended to set a umask of 0027 to properly permission all newly created files. + +Example: + +```shell +> chmod 0750 ~/.influxdbv2 +``` + +{{% /expand %}} +{{% expand "Recommended – Verify the authenticity of downloaded binary" %}} + +For added security, use `gpg` to verify the signature of your download. +(Most operating systems include the `gpg` command by default. +If `gpg` is not available, see the [GnuPG homepage](https://gnupg.org/download/) for installation instructions.) + +1. Download and import InfluxData's public key: + + ``` + curl -s https://repos.influxdata.com/influxdb2.key | gpg --import - + ``` + +2. Download the signature file for the release by adding `.asc` to the download URL. +For example: + + ``` + wget https://dl.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}-darwin-amd64.tar.gz.asc + ``` + +3. Verify the signature with `gpg --verify`: + + ``` + gpg --verify influxdb2-{{< latest-patch >}}-darwin-amd64.tar.gz.asc influxdb2-{{< latest-patch >}}-darwin-amd64.tar.gz + ``` + + The output from this command should include the following: + + ``` + gpg: Good signature from "InfluxData " [unknown] + ``` +{{% /expand %}} +{{< /expand-wrapper >}} + +{{% note %}} +Both InfluxDB 1.x and 2.x have associated `influxd` and `influx` binaries. +If InfluxDB 1.x binaries are already in your `$PATH`, run the {{< current-version >}} binaries in place +or rename them before putting them in your `$PATH`. +If you rename the binaries, all references to `influxd` and `influx` in this documentation refer to your renamed binaries. +{{% /note %}} + +#### Networking ports + +By default, InfluxDB uses TCP port `8086` for client-server communication over +the [InfluxDB HTTP API](/influxdb/v2.5/reference/api/). + +### Start and configure InfluxDB + +To start InfluxDB, run the `influxd` daemon: + +```bash +influxd +``` + +{{% note %}} +#### Run InfluxDB on macOS Catalina + +macOS Catalina requires downloaded binaries to be signed by registered Apple developers. +Currently, when you first attempt to run `influxd`, macOS will prevent it from running. +To manually authorize the `influxd` binary: + +1. Attempt to run `influxd`. +2. Open **System Preferences** and click **Security & Privacy**. +3. Under the **General** tab, there is a message about `influxd` being blocked. + Click **Open Anyway**. + +We are in the process of updating our build process to ensure released binaries are signed by InfluxData. +{{% /note %}} + +{{% warn %}} +#### "too many open files" errors + +After running `influxd`, you might see an error in the log output like the +following: + +```sh +too many open files +``` + +To resolve this error, follow the +[recommended steps](https://unix.stackexchange.com/a/221988/471569) to increase +file and process limits for your operating system version then restart `influxd`. + +{{% /warn %}} + +To configure InfluxDB, see [InfluxDB configuration options](/influxdb/v2.5/reference/config-options/), and the [`influxd` documentation](/influxdb/v2.5/reference/cli/influxd) for information about +available flags and options._ + +{{% note %}} +#### InfluxDB "phone home" + +By default, InfluxDB sends telemetry data back to InfluxData. +The [InfluxData telemetry](https://www.influxdata.com/telemetry) page provides +information about what data is collected and how it is used. + +To opt-out of sending telemetry data back to InfluxData, include the +`--reporting-disabled` flag when starting `influxd`. + +```bash +influxd --reporting-disabled +``` +{{% /note %}} + +{{% /tab-content %}} + + + +{{% tab-content %}} +## Download and install InfluxDB v{{< current-version >}} + +Do one of the following: + +- [Install InfluxDB as a service with systemd](#install-influxdb-as-a-service-with-systemd) +- [Manually download and install the influxd binary](#manually-download-and-install-the-influxd-binary) + +{{% note %}} +#### InfluxDB and the influx CLI are separate packages + +The InfluxDB server ([`influxd`](/influxdb/v2.5/reference/cli/influxd/)) and the +[`influx` CLI](/influxdb/v2.5/reference/cli/influx/) are packaged and +versioned separately. +For information about installing the `influx` CLI, see +[Install and use the influx CLI](/influxdb/v2.5/tools/influx-cli/). +{{% /note %}} + +### Install InfluxDB as a service with systemd + +1. Download and install the appropriate `.deb` or `.rpm` file using a URL from the + [InfluxData downloads page](https://portal.influxdata.com/downloads/) + with the following commands: + + ```sh + # Ubuntu/Debian + wget https://dl.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}-xxx.deb + sudo dpkg -i influxdb2-{{< latest-patch >}}-xxx.deb + + # Red Hat/CentOS/Fedora + wget https://dl.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}-xxx.rpm + sudo yum localinstall influxdb2-{{< latest-patch >}}-xxx.rpm + ``` + _Use the exact filename of the download of `.rpm` package (for example, `influxdb2-{{< latest-patch >}}-amd64.rpm`)._ + +2. Start the InfluxDB service: + + ```sh + sudo service influxdb start + ``` + + Installing the InfluxDB package creates a service file at `/lib/systemd/system/influxdb.service` + to start InfluxDB as a background service on startup. + +3. Restart your system and verify that the service is running correctly: + + ``` + $ sudo service influxdb status + ● influxdb.service - InfluxDB is an open-source, distributed, time series database + Loaded: loaded (/lib/systemd/system/influxdb.service; enabled; vendor preset: enable> + Active: active (running) + ``` + +For information about where InfluxDB stores data on disk when running as a service, +see [File system layout](/influxdb/v2.5/reference/internals/file-system-layout/?t=Linux#installed-as-a-package). + +To customize your InfluxDB configuration, use either +[command line flags (arguments)](#pass-arguments-to-systemd), environment variables, or an InfluxDB configuration file. +See InfluxDB [configuration options](/influxdb/v2.5/reference/config-options/) for more information. + +#### Pass arguments to systemd + +1. Add one or more lines like the following containing arguments for `influxd` to `/etc/default/influxdb2`: + + ```sh + ARG1="--http-bind-address :8087" + ARG2="" + ``` + +2. Edit the `/lib/systemd/system/influxdb.service` file as follows: + + ```sh + ExecStart=/usr/bin/influxd $ARG1 $ARG2 + ``` + +### Manually download and install the influxd binary + +1. **Download the InfluxDB binary.** + + Download the InfluxDB binary [from your browser](#download-from-your-browser) + or [from the command line](#download-from-the-command-line). + + #### Download from your browser + + InfluxDB v{{< current-version >}} (amd64) + InfluxDB v{{< current-version >}} (arm) + + #### Download from the command line + + ```sh + # amd64 + wget https://dl.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}-linux-amd64.tar.gz + + # arm + wget https://dl.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}-linux-arm64.tar.gz + ``` + +4. **Extract the downloaded binary.** + + _**Note:** The following commands are examples. Adjust the filenames, paths, and utilities if necessary._ + + ```sh + # amd64 + tar xvzf path/to/influxdb2-{{< latest-patch >}}-linux-amd64.tar.gz + + # arm + tar xvzf path/to/influxdb2-{{< latest-patch >}}-linux-arm64.tar.gz + ``` + +3. **(Optional) Place the extracted `influxd` executable binary in your system `$PATH`.** + + ```sh + # amd64 + sudo cp influxdb2-{{< latest-patch >}}-linux-amd64/influxd /usr/local/bin/ + + # arm + sudo cp influxdb2-{{< latest-patch >}}-linux-arm64/influxd /usr/local/bin/ + ``` + + If you do not move the `influxd` binary into your `$PATH`, prefix the executable + `./` to run it in place. + +{{< expand-wrapper >}} +{{% expand "Recommended – Set appropriate directory permissions" %}} + +To prevent unwanted access to data, we recommend setting the permissions on the influxdb `data-dir` to not be world readable. For server installs, it is also recommended to set a umask of 0027 to properly permission all newly created files. This can be done via the UMask directive in a systemd unit file, or by running influxdb under a specific user with the umask properly set. + +Example: + +```shell +> chmod 0750 ~/.influxdbv2 +``` + +{{% /expand %}} +{{% expand "Recommended – Verify the authenticity of downloaded binary" %}} + +For added security, use `gpg` to verify the signature of your download. +(Most operating systems include the `gpg` command by default. +If `gpg` is not available, see the [GnuPG homepage](https://gnupg.org/download/) for installation instructions.) + +1. Download and import InfluxData's public key: + + ``` + curl -s https://repos.influxdata.com/influxdb2.key | gpg --import - + ``` + +2. Download the signature file for the release by adding `.asc` to the download URL. + For example: + + ``` + wget https://dl.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}-linux-amd64.tar.gz.asc + ``` + +3. Verify the signature with `gpg --verify`: + + ``` + gpg --verify influxdb2-{{< latest-patch >}}-linux-amd64.tar.gz.asc influxdb2-{{< latest-patch >}}-linux-amd64.tar.gz + ``` + + The output from this command should include the following: + + ``` + gpg: Good signature from "InfluxData " [unknown] + ``` +{{% /expand %}} +{{< /expand-wrapper >}} + +## Start InfluxDB + +If InfluxDB was installed as a systemd service, systemd manages the `influxd` daemon and no further action is required. +If the binary was manually downloaded and added to the system `$PATH`, start the `influxd` daemon with the following command: + +```bash +influxd +``` + +_See the [`influxd` documentation](/influxdb/v2.5/reference/cli/influxd) for information about +available flags and options._ + +### Networking ports + +By default, InfluxDB uses TCP port `8086` for client-server communication over +the [InfluxDB HTTP API](/influxdb/v2.5/reference/api/). + +{{% note %}} +#### InfluxDB "phone home" + +By default, InfluxDB sends telemetry data back to InfluxData. +The [InfluxData telemetry](https://www.influxdata.com/telemetry) page provides +information about what data is collected and how it is used. + +To opt-out of sending telemetry data back to InfluxData, include the +`--reporting-disabled` flag when starting `influxd`. + +```bash +influxd --reporting-disabled +``` +{{% /note %}} + +{{% /tab-content %}} + + + +{{% tab-content %}} +{{% note %}} +#### System requirements +- Windows 10 +- 64-bit AMD architecture +- [Powershell](https://docs.microsoft.com/powershell/) or + [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/en-us/windows/wsl/) + +#### Command line examples +Use **Powershell** or **WSL** to execute `influx` and `influxd` commands. +The command line examples in this documentation use `influx` and `influxd` as if +installed on the system `PATH`. +If these binaries are not installed on your `PATH`, replace `influx` and `influxd` +in the provided examples with `./influx` and `./influxd` respectively. +{{% /note %}} + +## Download and install InfluxDB v{{< current-version >}} + +{{% note %}} +#### InfluxDB and the influx CLI are separate packages +The InfluxDB server ([`influxd`](/influxdb/v2.5/reference/cli/influxd/)) and the +[`influx` CLI](/influxdb/v2.5/reference/cli/influx/) are packaged and +versioned separately. +For information about installing the `influx` CLI, see +[Install and use the influx CLI](/influxdb/v2.5/tools/influx-cli/). +{{% /note %}} + +InfluxDB v{{< current-version >}} (Windows) + +Expand the downloaded archive into `C:\Program Files\InfluxData\` and rename the files if desired. + +```powershell +> Expand-Archive .\influxdb2-{{< latest-patch >}}-windows-amd64.zip -DestinationPath 'C:\Program Files\InfluxData\' +> mv 'C:\Program Files\InfluxData\influxdb2-{{< latest-patch >}}-windows-amd64' 'C:\Program Files\InfluxData\influxdb' +``` + +{{< expand-wrapper >}} +{{% expand "Recommended – Set appropriate directory permissions" %}} + +To prevent unwanted access to data, we recommend setting the permissions on the influxdb `data-dir` to not be world readable. + +Example: + +````powershell +> $acl = Get-Acl "C:\Users\\.influxdbv2" +> $accessRule = New-Object System.Security.AccessControl.FileSystemAccessRule("everyone","Read","Deny") +> $acl.SetAccessRule($accessRule) +> $acl | Set-Acl "C:\Users\\.influxdbv2" + +{{% /expand %}} +{{< /expand-wrapper >}} + + +## Networking ports +By default, InfluxDB uses TCP port `8086` for client-server communication over +the [InfluxDB HTTP API](/influxdb/v2.5/reference/api/). + +## Start InfluxDB +In **Powershell**, navigate into `C:\Program Files\InfluxData\influxdb` and start +InfluxDB by running the `influxd` daemon: + +```powershell +> cd -Path 'C:\Program Files\InfluxData\influxdb' +> ./influxd +``` + +_See the [`influxd` documentation](/influxdb/v2.5/reference/cli/influxd) for information about +available flags and options._ + +{{% note %}} +#### Grant network access +When starting InfluxDB for the first time, **Windows Defender** will appear with +the following message: + +> Windows Defender Firewall has blocked some features of this app. + +1. Select **Private networks, such as my home or work network**. +2. Click **Allow access**. +{{% /note %}} + +{{% note %}} +#### InfluxDB "phone home" + +By default, InfluxDB sends telemetry data back to InfluxData. +The [InfluxData telemetry](https://www.influxdata.com/telemetry) page provides +information about what data is collected and how it is used. + +To opt-out of sending telemetry data back to InfluxData, include the +`--reporting-disabled` flag when starting `influxd`. + +```bash +./influxd --reporting-disabled +``` +{{% /note %}} + +{{% /tab-content %}} + + + +{{% tab-content %}} +## Download and run InfluxDB v{{< current-version >}} + +Use `docker run` to download and run the InfluxDB v{{< current-version >}} Docker image. +Expose port `8086`, which InfluxDB uses for client-server communication over +the [InfluxDB HTTP API](/influxdb/v2.5/reference/api/). + +```sh +docker run --name influxdb -p 8086:8086 influxdb:{{< latest-patch >}} +``` +_To run InfluxDB in [detached mode](https://docs.docker.com/engine/reference/run/#detached-vs-foreground), include the `-d` flag in the `docker run` command._ + +## Persist data outside the InfluxDB container + +1. Create a new directory to store your data in and navigate into the directory. + + ```sh + mkdir path/to/influxdb-docker-data-volume && cd $_ + ``` +2. From within your new directory, run the InfluxDB Docker container with the `--volume` flag to + persist data from `/var/lib/influxdb2` _inside_ the container to the current working directory in + the host file system. + + ```sh + docker run \ + --name influxdb \ + -p 8086:8086 \ + --volume $PWD:/var/lib/influxdb2 \ + influxdb:{{< latest-patch >}} + ``` + +## Configure InfluxDB with Docker + +To mount an InfluxDB configuration file and use it from within Docker: + +1. [Persist data outside the InfluxDB container](#persist-data-outside-the-influxdb-container). + +2. Use the command below to generate the default configuration file on the host file system: + + ```sh + docker run \ + --rm influxdb:{{< latest-patch >}} \ + influxd print-config > config.yml + ``` + +3. Modify the default configuration, which will now be available under `$PWD`. + +4. Start the InfluxDB container: + + ```sh + docker run -p 8086:8086 \ + -v $PWD/config.yml:/etc/influxdb2/config.yml \ + influxdb:{{< latest-patch >}} + ``` + +(Find more about configuring InfluxDB [here](https://docs.influxdata.com/influxdb/v2.5/reference/config-options/).) + +## Open a shell in the InfluxDB container + +To use the `influx` command line interface, open a shell in the `influxdb` Docker container: + +```sh +docker exec -it influxdb /bin/bash +``` + +{{% note %}} +#### InfluxDB "phone home" + +By default, InfluxDB sends telemetry data back to InfluxData. +The [InfluxData telemetry](https://www.influxdata.com/telemetry) page provides +information about what data is collected and how it is used. + +To opt-out of sending telemetry data back to InfluxData, include the +`--reporting-disabled` flag when starting the InfluxDB container. + +```sh +docker run -p 8086:8086 influxdb:{{< latest-patch >}} --reporting-disabled +``` +{{% /note %}} + +{{% /tab-content %}} + + + +{{% tab-content %}} + +## Install InfluxDB in a Kubernetes cluster + +The instructions below use **minikube** or **kind**, but the steps should be similar in any Kubernetes cluster. +InfluxData also makes [Helm charts](https://github.com/influxdata/helm-charts) available. + +1. Install [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) or + [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation). + +2. Start a local cluster: + + ```sh + # with minikube + minikube start + + # with kind + kind create cluster + ``` + +3. Apply the [sample InfluxDB configuration](https://github.com/influxdata/docs-v2/blob/master/static/downloads/influxdb-k8-minikube.yaml) by running: + + ```sh + kubectl apply -f https://raw.githubusercontent.com/influxdata/docs-v2/master/static/downloads/influxdb-k8-minikube.yaml + ``` + + This creates an `influxdb` Namespace, Service, and StatefulSet. + A PersistentVolumeClaim is also created to store data written to InfluxDB. + + **Important**: Always inspect YAML manifests before running `kubectl apply -f `! + +4. Ensure the Pod is running: + + ```sh + kubectl get pods -n influxdb + ``` + +5. Ensure the Service is available: + + ```sh + kubectl describe service -n influxdb influxdb + ``` + + You should see an IP address after `Endpoints` in the command's output. + +6. Forward port 8086 from inside the cluster to localhost: + + ```sh + kubectl port-forward -n influxdb service/influxdb 8086:8086 + ``` + +{{% /tab-content %}} + + +{{% tab-content %}} + +## Install InfluxDB v{{< current-version >}} on Raspberry Pi + +{{% note %}} +#### Requirements + +To run InfluxDB on Raspberry Pi, you need: + +- a Raspberry Pi 4+ or 400 +- a 64-bit operating system. + We recommend installing a [64-bit version of Ubuntu](https://ubuntu.com/download/raspberry-pi) + of Ubuntu Desktop or Ubuntu Server compatible with 64-bit Raspberry Pi. +{{% /note %}} + +### Install Linux binaries + +Follow the [Linux installation instructions](/influxdb/v2.5/install/?t=Linux) +to install InfluxDB on a Raspberry Pi. + +### Monitor your Raspberry Pi +Use the [InfluxDB Raspberry Pi template](/influxdb/cloud/monitor-alert/templates/infrastructure/raspberry-pi/) +to easily configure collecting and visualizing system metrics for the Raspberry Pi. + +#### Monitor 32-bit Raspberry Pi systems +If you have a 32-bit Raspberry Pi, [use Telegraf](/{{< latest "telegraf" >}}/) +to collect and send data to: + +- [InfluxDB OSS](/influxdb/v2.5/), running on a 64-bit system +- InfluxDB Cloud with a [**Free Tier**](/influxdb/cloud/account-management/pricing-plans/#free-plan) account +- InfluxDB Cloud with a paid [**Usage-Based**](/influxdb/cloud/account-management/pricing-plans/#usage-based-plan) account with relaxed resource restrictions. + +{{% /tab-content %}} + + +{{< /tabs-wrapper >}} + +## Download and install the influx CLI +The [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) lets you manage InfluxDB +from your command line. + +Download and install the influx CLI + +## Set up InfluxDB + +The initial setup process for an InfluxDB instance creates the following: +- An organization with the name you provide. +- A primary bucket with the name you provide. +- An admin [authorization](/influxdb/v2.5/security/tokens/) with the following properties: + - The username and password that you provide. + - An API token (_[operator token](/influxdb/v2.5/security/tokens/#operator-token)_). + - Read-write permissions for all resources in the InfluxDB instance. + +To run an interactive setup that prompts you for the required information, +use the InfluxDB user interface (UI) or the `influx` command line interface (CLI). + +To automate the setup--for example, with a script that you write-- +use the `influx` command line interface (CLI) or the InfluxDB `/api/v2` API. + +{{< tabs-wrapper >}} +{{% tabs %}} +[Set up with the UI](#) +[Set up with the CLI](#) +{{% /tabs %}} + + +{{% tab-content %}} +### Set up InfluxDB through the UI + +1. With InfluxDB running, visit [http://localhost:8086](http://localhost:8086). +2. Click **Get Started** + +#### Set up your initial user + +1. Enter a **Username** for your initial user. +2. Enter a **Password** and **Confirm Password** for your user. +3. Enter your initial **Organization Name**. +4. Enter your initial **Bucket Name**. +5. Click **Continue**. + +Your InfluxDB instance is now initialized. + +### (Optional) Set up and use the influx CLI + +To avoid having to pass your InfluxDB +API token with each `influx` command, set up a configuration profile to store your credentials--for example, +enter the following code in your terminal: + + ```sh + # Set up a configuration profile + influx config create -n default \ + -u http://localhost:8086 \ + -o INFLUX_ORG \ + -t INFLUX_API_TOKEN \ + -a + ``` + +Replace the following: + +- **`INFLUX_ORG`**: [your organization name](/influxdb/v2.5/organizations/view-orgs/). +- **`INFLUX_API_TOKEN`**: [your API token](/influxdb/v2.5/security/tokens/view-tokens/). + +This configures a new profile named `default` and makes the profile active +so your `influx` CLI commands run against the specified InfluxDB instance. +For more detail about configuration profiles, see [`influx config`](/influxdb/v2.5/reference/cli/influx/config/). + +Once you have the `default` configuration profile, you're ready to [create All-Access tokens](#create-all-access-tokens) +or get started [collecting and writing data](/influxdb/v2.5/write-data). + +{{% /tab-content %}} + + + +{{% tab-content %}} +### Set up InfluxDB through the influx CLI + +Use the `influx setup` CLI command in interactive or non-interactive (_headless_) mode to initialize +your InfluxDB instance. + +Do one of the following: + +- [Run `influx setup` without user interaction](#run-influx-setup-without-user-interaction) +- [Run `influx setup` with user prompts](#run-influx-setup-with-user-prompts) + +#### Run `influx setup` without user interaction + +To run the InfluxDB setup process with your automation scripts, pass [flags](/influxdb/v2.5/reference/cli/influx/setup/#flags) +with the required information to the `influx setup` command. +Pass the `-f, --force` flag to bypass screen prompts. + +The following example command shows how to set up InfluxDB in non-interactive +mode with an initial admin user, +_[operator token](/influxdb/v2.5/security/tokens/#operator-token)_, +and bucket: + +```sh +influx setup -u USERNAME -p PASSWORD -t TOKEN -o ORGANIZATION_NAME -b BUCKET_NAME -f +``` + +The output is the following: + +```sh +User Organization Bucket +USERNAME ORGANIZATION_NAME BUCKET_NAME +``` + +If you run `influx setup` without the `-t, --token` flag, then InfluxDB +automatically generates an API token for the initial authorization--for example, +the following setup command creates the initial authorization with an +auto-generated API token: + +```sh +influx setup -u USERNAME -p PASSWORD -o ORGANIZATION_NAME -b BUCKET_NAME -f +``` + +Once setup completes, InfluxDB is initialized with the [authorization](/influxdb/v2.5/security/tokens/), [user](/influxdb/v2.5/reference/glossary/#user), [organization](/influxdb/v2.5/reference/glossary/#organization), and [bucket](/influxdb/v2.5/reference/glossary/#bucket). + +InfluxDB creates a `default` configuration profile for you that provides your +InfluxDB URL, organization, and API token to `influx` CLI commands. +For more detail about configuration profiles, see [`influx config`](/influxdb/v2.5/reference/cli/influx/config/). + +Once you have the `default` configuration profile, you're ready to [create All-Access tokens](#create-all-access-tokens) +or get started [collecting and writing data](/influxdb/v2.5/write-data). + +#### Run `influx setup` with user prompts + +To run setup with prompts for the required information, enter the following +command in your terminal: + +```sh +influx setup +``` + +Complete the following steps as prompted by the CLI: + +1. Enter a **primary username**. +2. Enter a **password** for your user. +3. **Confirm your password** by entering it again. +4. Enter a name for your **primary organization**. +5. Enter a name for your **primary bucket**. +6. Enter a **retention period** for your primary bucket—valid units are + nanoseconds (`ns`), microseconds (`us` or `µs`), milliseconds (`ms`), + seconds (`s`), minutes (`m`), hours (`h`), days (`d`), and weeks (`w`). + Enter nothing for an infinite retention period. +7. Confirm the details for your primary user, organization, and bucket. + +Once setup completes, InfluxDB is initialized with the user, organization, bucket, +and _[operator token](/influxdb/v2.5/security/tokens/#operator-token)_. + +InfluxDB creates a `default` configuration profile for you that provides your +InfluxDB URL, organization, and API token to `influx` CLI commands. +For more detail about configuration profiles, see [`influx config`](/influxdb/v2.5/reference/cli/influx/config/). + +Once you have the `default` configuration profile, you're ready to [create All-Access tokens](#create-all-access-tokens) +or get started [collecting and writing data](/influxdb/v2.5/write-data). + +{{% /tab-content %}} + +{{< /tabs-wrapper >}} + +### Create All-Access tokens + +Because [Operator tokens](/influxdb/v2.5/security/tokens/#operator-token) +have full read and write access to all organizations in the database, +we recommend +[creating an All-Access token](/influxdb/v2.5/security/tokens/create-token/) +for each organization and using those tokens to manage InfluxDB. diff --git a/content/influxdb/v2.5/migrate-data/_index.md b/content/influxdb/v2.5/migrate-data/_index.md new file mode 100644 index 000000000..e357c547b --- /dev/null +++ b/content/influxdb/v2.5/migrate-data/_index.md @@ -0,0 +1,15 @@ +--- +title: Migrate data to InfluxDB +description: > + Migrate data to InfluxDB from other InfluxDB instances including by InfluxDB OSS + and InfluxDB Cloud. +menu: + influxdb_2_5: + name: Migrate data +weight: 9 +--- + +Migrate data to InfluxDB from other InfluxDB instances including by InfluxDB OSS +and InfluxDB Cloud. + +{{< children >}} diff --git a/content/influxdb/v2.5/migrate-data/migrate-cloud-to-oss.md b/content/influxdb/v2.5/migrate-data/migrate-cloud-to-oss.md new file mode 100644 index 000000000..1a031a8dc --- /dev/null +++ b/content/influxdb/v2.5/migrate-data/migrate-cloud-to-oss.md @@ -0,0 +1,372 @@ +--- +title: Migrate data from InfluxDB Cloud to InfluxDB OSS +description: > + To migrate data from InfluxDB Cloud to InfluxDB OSS, query the data from + InfluxDB Cloud in time-based batches and write the data to InfluxDB OSS. +menu: + influxdb_2_5: + name: Migrate from Cloud to OSS + parent: Migrate data +weight: 102 +--- + +To migrate data from InfluxDB Cloud to InfluxDB OSS, query the data +from InfluxDB Cloud and write the data to InfluxDB OSS. +Because full data migrations will likely exceed your organization's limits and +adjustable quotas, migrate your data in batches. + +The following guide provides instructions for setting up an InfluxDB OSS task +that queries data from an InfluxDB Cloud bucket in time-based batches and writes +each batch to an InfluxDB OSS bucket. + +{{% cloud %}} +All queries against data in InfluxDB Cloud are subject to your organization's +[rate limits and adjustable quotas](/influxdb/cloud/account-management/limits/). +{{% /cloud %}} + +- [Set up the migration](#set-up-the-migration) +- [Migration task](#migration-task) + - [Configure the migration](#configure-the-migration) + - [Migration Flux script](#migration-flux-script) + - [Configuration help](#configuration-help) +- [Monitor the migration progress](#monitor-the-migration-progress) +- [Troubleshoot migration task failures](#troubleshoot-migration-task-failures) + +## Set up the migration +1. [Install and set up InfluxDB OSS](/influxdb/{{< current-version-link >}}/install/). + +2. **In InfluxDB Cloud**, [create an API token](/influxdb/cloud/security/tokens/create-token/) + with **read access** to the bucket you want to migrate. + +3. **In InfluxDB OSS**: + 1. Add your **InfluxDB Cloud API token** as a secret using the key, + `INFLUXDB_CLOUD_TOKEN`. + _See [Add secrets](/influxdb/{{< current-version-link >}}/security/secrets/add/) for more information._ + 2. [Create a bucket](/influxdb/{{< current-version-link >}}/organizations/buckets/create-bucket/) + **to migrate data to**. + 3. [Create a bucket](/influxdb/{{< current-version-link >}}/organizations/buckets/create-bucket/) + **to store temporary migration metadata**. + 4. [Create a new task](/influxdb/{{< current-version-link >}}/process-data/manage-tasks/create-task/) + using the provided [migration task](#migration-task). + Update the necessary [migration configuration options](#configure-the-migration). + 5. _(Optional)_ Set up [migration monitoring](#monitor-the-migration-progress). + 6. Save the task. + + {{% note %}} +Newly-created tasks are enabled by default, so the data migration begins when you save the task. + {{% /note %}} + +**After the migration is complete**, each subsequent migration task execution +will fail with the following error: + +``` +error exhausting result iterator: error calling function "die" @41:9-41:86: +Batch range is beyond the migration range. Migration is complete. +``` + +## Migration task + +### Configure the migration +1. Specify how often you want the task to run using the `task.every` option. + _See [Determine your task interval](#determine-your-task-interval)._ + +2. Define the following properties in the `migration` + [record](/{{< latest "flux" >}}/data-types/composite/record/): + + ##### migration + - **start**: Earliest time to include in the migration. + _See [Determine your migration start time](#determine-your-migration-start-time)._ + - **stop**: Latest time to include in the migration. + - **batchInterval**: Duration of each time-based batch. + _See [Determine your batch interval](#determine-your-batch-interval)._ + - **batchBucket**: InfluxDB OSS bucket to store migration batch metadata in. + - **sourceHost**: [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions) + to migrate data from. + - **sourceOrg**: InfluxDB Cloud organization to migrate data from. + - **sourceToken**: InfluxDB Cloud API token. To keep the API token secure, store + it as a secret in InfluxDB OSS. + - **sourceBucket**: InfluxDB Cloud bucket to migrate data from. + - **destinationBucket**: InfluxDB OSS bucket to migrate data to. + +### Migration Flux script + +```js +import "array" +import "experimental" +import "influxdata/influxdb/secrets" + +// Configure the task +option task = {every: 5m, name: "Migrate data from InfluxDB Cloud"} + +// Configure the migration +migration = { + start: 2022-01-01T00:00:00Z, + stop: 2022-02-01T00:00:00Z, + batchInterval: 1h, + batchBucket: "migration", + sourceHost: "https://cloud2.influxdata.com", + sourceOrg: "example-cloud-org", + sourceToken: secrets.get(key: "INFLUXDB_CLOUD_TOKEN"), + sourceBucket: "example-cloud-bucket", + destinationBucket: "example-oss-bucket", +} + +// batchRange dynamically returns a record with start and stop properties for +// the current batch. It queries migration metadata stored in the +// `migration.batchBucket` to determine the stop time of the previous batch. +// It uses the previous stop time as the new start time for the current batch +// and adds the `migration.batchInterval` to determine the current batch stop time. +batchRange = () => { + _lastBatchStop = + (from(bucket: migration.batchBucket) + |> range(start: migration.start) + |> filter(fn: (r) => r._field == "batch_stop") + |> filter(fn: (r) => r.srcOrg == migration.sourceOrg) + |> filter(fn: (r) => r.srcBucket == migration.sourceBucket) + |> last() + |> findRecord(fn: (key) => true, idx: 0))._value + _batchStart = + if exists _lastBatchStop then + time(v: _lastBatchStop) + else + migration.start + + return {start: _batchStart, stop: experimental.addDuration(d: migration.batchInterval, to: _batchStart)} +} + +// Define a static record with batch start and stop time properties +batch = {start: batchRange().start, stop: batchRange().stop} + +// Check to see if the current batch start time is beyond the migration.stop +// time and exit with an error if it is. +finished = + if batch.start >= migration.stop then + die(msg: "Batch range is beyond the migration range. Migration is complete.") + else + "Migration in progress" + +// Query all data from the specified source bucket within the batch-defined time +// range. To limit migrated data by measurement, tag, or field, add a `filter()` +// function after `range()` with the appropriate predicate fn. +data = () => + from(host: migration.sourceHost, org: migration.sourceOrg, token: migration.sourceToken, bucket: migration.sourceBucket) + |> range(start: batch.start, stop: batch.stop) + +// rowCount is a stream of tables that contains the number of rows returned in +// the batch and is used to generate batch metadata. +rowCount = + data() + |> group(columns: ["_start", "_stop"]) + |> count() + +// emptyRange is a stream of tables that acts as filler data if the batch is +// empty. This is used to generate batch metadata for empty batches and is +// necessary to correctly increment the time range for the next batch. +emptyRange = array.from(rows: [{_start: batch.start, _stop: batch.stop, _value: 0}]) + +// metadata returns a stream of tables representing batch metadata. +metadata = () => { + _input = + if exists (rowCount |> findRecord(fn: (key) => true, idx: 0))._value then + rowCount + else + emptyRange + + return + _input + |> map( + fn: (r) => + ({ + _time: now(), + _measurement: "batches", + srcOrg: migration.sourceOrg, + srcBucket: migration.sourceBucket, + dstBucket: migration.destinationBucket, + batch_start: string(v: batch.start), + batch_stop: string(v: batch.stop), + rows: r._value, + percent_complete: + float(v: int(v: r._stop) - int(v: migration.start)) / float( + v: int(v: migration.stop) - int(v: migration.start), + ) * 100.0, + }), + ) + |> group(columns: ["_measurement", "srcOrg", "srcBucket", "dstBucket"]) +} + +// Write the queried data to the specified InfluxDB OSS bucket. +data() + |> to(bucket: migration.destinationBucket) + +// Generate and store batch metadata in the migration.batchBucket. +metadata() + |> experimental.to(bucket: migration.batchBucket) +``` + +### Configuration help + +{{< expand-wrapper >}} + + +{{% expand "Determine your task interval" %}} + +The task interval determines how often the migration task runs and is defined by +the [`task.every` option](/influxdb/v2.5/process-data/task-options/#every). +InfluxDB Cloud rate limits and quotas reset every five minutes, so +**we recommend a `5m` task interval**. + +You can do shorter task intervals and execute the migration task more often, +but you need to balance the task interval with your [batch interval](#determine-your-batch-interval) +and the amount of data returned in each batch. +If the total amount of data queried in each five-minute interval exceeds your +InfluxDB Cloud organization's [rate limits and quotas](/influxdb/cloud/account-management/limits/), +the batch will fail until rate limits and quotas reset. + +{{% /expand %}} + + + +{{% expand "Determine your migration start time" %}} + +The `migration.start` time should be at or near the same time as the earliest +data point you want to migrate. +All migration batches are determined using the `migration.start` time and +`migration.batchInterval` settings. + +To find time of the earliest point in your bucket, run the following query: + +```js +from(bucket: "example-cloud-bucket") + |> range(start: 0) + |> group() + |> first() + |> keep(columns: ["_time"]) +``` + +{{% /expand %}} + + + +{{% expand "Determine your batch interval" %}} + +The `migration.batchInterval` setting controls the time range queried by each batch. +The "density" of the data in your InfluxDB Cloud bucket and your InfluxDB Cloud +organization's [rate limits and quotas](/influxdb/cloud/account-management/limits/) +determine what your batch interval should be. + +For example, if you're migrating data collected from hundreds of sensors with +points recorded every second, your batch interval will need to be shorter. +If you're migrating data collected from five sensors with points recorded every +minute, your batch interval can be longer. +It all depends on how much data gets returned in a single batch. + +If points occur at regular intervals, you can get a fairly accurate estimate of +how much data will be returned in a given time range by using the `/api/v2/query` +endpoint to execute a query for the time range duration and then measuring the +size of the response body. + +The following `curl` command queries an InfluxDB Cloud bucket for the last day +and returns the size of the response body in bytes. +You can customize the range duration to match your specific use case and +data density. + +```sh +INFLUXDB_CLOUD_ORG= +INFLUXDB_CLOUD_TOKEN= +INFLUXDB_CLOUD_BUCKET= + +curl -so /dev/null --request POST \ + https://cloud2.influxdata.com/api/v2/query?org=$INFLUXDB_CLOUD_ORG \ + --header "Authorization: Token $INFLUXDB_CLOUD_TOKEN" \ + --header "Accept: application/csv" \ + --header "Content-type: application/vnd.flux" \ + --data "from(bucket:\"$INFLUXDB_CLOUD_BUCKET\") |> range(start: -1d, stop: now())" \ + --write-out '%{size_download}' +``` + +{{% note %}} +You can also use other HTTP API tools like [Postman](https://www.postman.com/) +that provide the size of the response body. +{{% /note %}} + +Divide the output of this command by 1000000 to convert it to megabytes (MB). + +``` +batchInterval = (read-rate-limit-mb / response-body-size-mb) * range-duration +``` + +For example, if the response body of your query that returns data from one day +is 8 MB and you're using the InfluxDB Cloud Free Plan with a read limit of +300 MB per five minutes: + +```js +batchInterval = (300 / 8) * 1d +// batchInterval = 37d +``` + +You could query 37 days of data before hitting your read limit, but this is just an estimate. +We recommend setting the `batchInterval` slightly lower than the calculated interval +to allow for variation between batches. +So in this example, **it would be best to set your `batchInterval` to `35d`**. + +##### Important things to note +- This assumes no other queries are running in your InfluxDB Cloud organization. +- You should also consider your network speeds and whether a batch can be fully + downloaded within the [task interval](#determine-your-task-interval). + +{{% /expand %}} + +{{< /expand-wrapper >}} + +## Monitor the migration progress +The [InfluxDB Cloud Migration Community template](https://github.com/influxdata/community-templates/tree/master/influxdb-cloud-oss-migration/) +installs the migration task outlined in this guide as well as a dashboard +for monitoring running data migrations. + +{{< img-hd src="/img/influxdb/2-1-migration-dashboard.png" alt="InfluxDB Cloud migration dashboard" />}} + +Install the InfluxDB Cloud Migration template + +## Troubleshoot migration task failures +If the migration task fails, [view your task logs](/influxdb/v2.5/process-data/manage-tasks/task-run-history/) +to identify the specific error. Below are common causes of migration task failures. + +- [Exceeded rate limits](#exceeded-rate-limits) +- [Invalid API token](#invalid-api-token) +- [Query timeout](#query-timeout) + +### Exceeded rate limits +If your data migration causes you to exceed your InfluxDB Cloud organization's +limits and quotas, the task will return an error similar to: + +``` +too many requests +``` + +**Possible solutions**: +- Update the `migration.batchInterval` setting in your migration task to use + a smaller interval. Each batch will then query less data. + +### Invalid API token +If the API token you add as the `INFLUXDB_CLOUD_SECRET` doesn't have read access to +your InfluxDB Cloud bucket, the task will return an error similar to: + +``` +unauthorized access +``` + +**Possible solutions**: +- Ensure the API token has read access to your InfluxDB Cloud bucket. +- Generate a new InfluxDB Cloud API token with read access to the bucket you + want to migrate. Then, update the `INFLUXDB_CLOUD_TOKEN` secret in your + InfluxDB OSS instance with the new token. + +### Query timeout +The InfluxDB Cloud query timeout is 90 seconds. If it takes longer than this to +return the data from the batch interval, the query will time out and the +task will fail. + +**Possible solutions**: +- Update the `migration.batchInterval` setting in your migration task to use + a smaller interval. Each batch will then query less data and take less time + to return results. diff --git a/content/influxdb/v2.5/migrate-data/migrate-oss.md b/content/influxdb/v2.5/migrate-data/migrate-oss.md new file mode 100644 index 000000000..6a25eada1 --- /dev/null +++ b/content/influxdb/v2.5/migrate-data/migrate-oss.md @@ -0,0 +1,64 @@ +--- +title: Migrate data from InfluxDB OSS to other InfluxDB instances +description: > + To migrate data from an InfluxDB OSS bucket to another InfluxDB OSS or InfluxDB + Cloud bucket, export your data as line protocol and write it to your other + InfluxDB bucket. +menu: + influxdb_2_5: + name: Migrate data from OSS + parent: Migrate data +weight: 101 +--- + +To migrate data from an InfluxDB OSS bucket to another InfluxDB OSS or InfluxDB +Cloud bucket, export your data as line protocol and write it to your other +InfluxDB bucket. + +{{% cloud %}} +#### InfluxDB Cloud write limits +If migrating data from InfluxDB OSS to InfluxDB Cloud, you are subject to your +[InfluxDB Cloud organization's rate limits and adjustable quotas](/influxdb/cloud/account-management/limits/). +Consider exporting your data in time-based batches to limit the file size +of exported line protocol to match your InfluxDB Cloud organization's limits. +{{% /cloud %}} + +1. [Find the InfluxDB OSS bucket ID](/influxdb/{{< current-version-link >}}/organizations/buckets/view-buckets/) + that contains data you want to migrate. +2. Use the `influxd inspect export-lp` command to export data in your bucket as + [line protocol](/influxdb/v2.5/reference/syntax/line-protocol/). + Provide the following: + + - **bucket ID**: ({{< req >}}) ID of the bucket to migrate. + - **engine path**: ({{< req >}}) Path to the TSM storage files on disk. + The default engine path [depends on your operating system](/influxdb/{{< current-version-link >}}/reference/internals/file-system-layout/#file-system-layout), + If using a [custom engine-path](/influxdb/{{< current-version-link >}}/reference/config-options/#engine-path) + provide your custom path. + - **output path**: ({{< req >}}) File path to output line protocol to. + - **start time**: Earliest time to export. + - **end time**: Latest time to export. + - **measurement**: Export a specific measurement. By default, the command + exports all measurements. + - **compression**: ({{< req text="Recommended" color="magenta" >}}) + Use Gzip compression to compress the output line protocol file. + + ```sh + influxd inspect export-lp \ + --bucket-id 12ab34cd56ef \ + --engine-path ~/.influxdbv2/engine \ + --output-path path/to/export.lp + --start 2022-01-01T00:00:00Z \ + --end 2022-01-31T23:59:59Z \ + --compress + ``` + +3. Write the exported line protocol to your InfluxDB OSS or InfluxDB Cloud instance. + + Do any of the following: + + - Write line protocol in the **InfluxDB UI**: + - [InfluxDB Cloud UI](/influxdb/cloud/write-data/no-code/load-data/#load-csv-or-line-protocol-in-ui) + - [InfluxDB OSS {{< current-version >}} UI](/influxdb/{{< current-version-link >}}/write-data/no-code/load-data/#load-csv-or-line-protocol-in-ui) + - [Write line protocol using the `influx write` command](/influxdb/{{< current-version-link >}}/reference/cli/influx/write/) + - [Write line protocol using the InfluxDB API](/influxdb/{{< current-version-link >}}/write-data/developer-tools/api/) + - [Bulk ingest data (InfluxDB Cloud)](/influxdb/cloud/write-data/bulk-ingest-cloud/) diff --git a/content/influxdb/v2.5/monitor-alert/_index.md b/content/influxdb/v2.5/monitor-alert/_index.md new file mode 100644 index 000000000..2d0038320 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/_index.md @@ -0,0 +1,38 @@ +--- +title: Monitor data and send alerts +seotitle: Monitor data and send alerts +description: > + Monitor your time series data and send alerts by creating checks, notification + rules, and notification endpoints. Or use community templates to monitor supported environments. +menu: + influxdb_2_5: + name: Monitor & alert +weight: 7 +influxdb/v2.5/tags: [monitor, alert, checks, notification, endpoints] +--- + +Monitor your time series data and send alerts by creating checks, notification +rules, and notification endpoints. Or use [community templates to monitor](/influxdb/v2.5/monitor-alert/templates/) supported environments. + +## Overview + +1. A [check](/influxdb/v2.5/reference/glossary/#check) in InfluxDB queries data and assigns a status with a `_level` based on specific conditions. +2. InfluxDB stores the output of a check in the `statuses` measurement in the `_monitoring` system bucket. +3. [Notification rules](/influxdb/v2.5/reference/glossary/#notification-rule) check data in the `statuses` + measurement and, based on conditions set in the notification rule, send a message + to a [notification endpoint](/influxdb/v2.5/reference/glossary/#notification-endpoint). +4. InfluxDB stores notifications in the `notifications` measurement in the `_monitoring` system bucket. + +## Create an alert + +To get started, do the following: + +1. [Create checks](/influxdb/v2.5/monitor-alert/checks/create/) to monitor data and assign a status. +2. [Add notification endpoints](/influxdb/v2.5/monitor-alert/notification-endpoints/create/) + to send notifications to third parties. +3. [Create notification rules](/influxdb/v2.5/monitor-alert/notification-rules/create) to check + statuses and send notifications to your notifications endpoints. + +## Manage your monitoring and alerting pipeline + +{{< children >}} diff --git a/content/influxdb/v2.5/monitor-alert/checks/_index.md b/content/influxdb/v2.5/monitor-alert/checks/_index.md new file mode 100644 index 000000000..fd6d3bd8b --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/checks/_index.md @@ -0,0 +1,19 @@ +--- +title: Manage checks +seotitle: Manage monitoring checks in InfluxDB +description: > + Checks in InfluxDB query data and apply a status or level to each data point based on specified conditions. +menu: + influxdb_2_5: + parent: Monitor & alert +weight: 101 +influxdb/v2.5/tags: [monitor, checks, notifications, alert] +related: + - /influxdb/v2.5/monitor-alert/notification-rules/ + - /influxdb/v2.5/monitor-alert/notification-endpoints/ +--- + +Checks in InfluxDB query data and apply a status or level to each data point based on specified conditions. +Learn how to create and manage checks: + +{{< children >}} diff --git a/content/influxdb/v2.5/monitor-alert/checks/create.md b/content/influxdb/v2.5/monitor-alert/checks/create.md new file mode 100644 index 000000000..71f9302cc --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/checks/create.md @@ -0,0 +1,150 @@ +--- +title: Create checks +seotitle: Create monitoring checks in InfluxDB +description: > + Create a check in the InfluxDB UI. +menu: + influxdb_2_5: + parent: Manage checks +weight: 201 +related: + - /influxdb/v2.5/monitor-alert/notification-rules/ + - /influxdb/v2.5/monitor-alert/notification-endpoints/ +--- + +Create a check in the InfluxDB user interface (UI). +Checks query data and apply a status to each point based on specified conditions. + +## Parts of a check +A check consists of two parts – a query and check configuration. + +#### Check query +- Specifies the dataset to monitor. +- May include tags to narrow results. + +#### Check configuration +- Defines check properties, including the check interval and status message. +- Evaluates specified conditions and applies a status (if applicable) to each data point: + - `crit` + - `warn` + - `info` + - `ok` +- Stores status in the `_level` column. + +## Check types +There are two types of checks: + +- [threshold](#threshold-check) +- [deadman](#deadman-check) + +#### Threshold check +A threshold check assigns a status based on a value being above, below, +inside, or outside of defined thresholds. + +#### Deadman check +A deadman check assigns a status to data when a series or group doesn't report +in a specified amount of time. + +## Create a check +1. In the navigation menu on the left, select **Alerts > Alerts**. + + {{< nav-icon "alerts" >}} + +2. Click **{{< caps >}}{{< icon "plus" >}} Create{{< /caps >}}** and select the [type of check](#check-types) to create. +3. Click **Name this check** in the top left corner and provide a unique name for the check, and then do the following: + - [Configure the check query](#configure-the-check-query) + - [Configure the check](#configure-the-check) +4. _(Optional)_ In the **Name this check** field at the top, enter a unique name for the check. + +#### Configure the check query +1. Select the **bucket**, **measurement**, **field** and **tag sets** to query. +2. If creating a threshold check, select an **aggregate function**. + Aggregate functions aggregate data between the specified check intervals and + return a single value for the check to process. + + In the **Aggregate functions** column, select an interval from the interval drop-down list + (for example, "Every 5 minutes") and an aggregate function from the list of functions. +3. Click **{{< caps >}}Submit{{< /caps >}}** to run the query and preview the results. + To see the raw query results, click the **View Raw Data {{< icon "toggle" >}}** toggle. + +#### Configure the check +1. Click **{{< caps >}}2. Configure Check{{< /caps >}}** near the top of the window. +2. In the **{{< caps >}}Properties{{< /caps >}}** column, configure the following: + + ##### Schedule Every + Select the interval to run the check (for example, "Every 5 minutes"). + This interval matches the aggregate function interval for the check query. + _Changing the interval here will update the aggregate function interval._ + + ##### Offset + Delay the execution of a task to account for any late data. + Offset queries do not change the queried time range. + + {{% note %}}Your offset must be shorter than your [check interval](#schedule-every). + {{% /note %}} + + ##### Tags + Add custom tags to the query output. + Each custom tag appends a new column to each row in the query output. + The column label is the tag key and the column value is the tag value. + + Use custom tags to associate additional metadata with the check. + Common metadata tags across different checks lets you easily group and organize checks. + You can also use custom tags in [notification rules](/influxdb/v2.5/monitor-alert/notification-rules/create/). + +3. In the **{{< caps >}}Status Message Template{{< /caps >}}** column, enter + the status message template for the check. + Use [Flux string interpolation](/{{< latest "flux" >}}/data-types/basic/string/#interpolate-strings) + to populate the message with data from the query. + + Check data is represented as a record, `r`. + Access specific column values using dot notation: `r.columnName`. + + Use data from the following columns: + + - columns included in the query output + - [custom tags](#tags) added to the query output + - `_check_id` + - `_check_name` + - `_level` + - `_source_measurement` + - `_type` + + ###### Example status message template + ``` + From ${r._check_name}: + ${r._field} is ${r._level}. + Its value is ${string(v: r.field_name)}. + ``` + + When a check generates a status, it stores the message in the `_message` column. + +4. Define check conditions that assign statuses to points. + Condition options depend on your check type. + + ##### Configure a threshold check + 1. In the **{{< caps >}}Thresholds{{< /caps >}}** column, click the status name (CRIT, WARN, INFO, or OK) + to define conditions for that specific status. + 2. From the **When value** drop-down list, select a threshold: is above, is below, + is inside of, is outside of. + 3. Enter a value or values for the threshold. + You can also use the threshold sliders in the data visualization to define threshold values. + + ##### Configure a deadman check + 1. In the **{{< caps >}}Deadman{{< /caps >}}** column, enter a duration for the deadman check in the **for** field. + For example, `90s`, `5m`, `2h30m`, etc. + 2. Use the **set status to** drop-down list to select a status to set on a dead series. + 3. In the **And stop checking after** field, enter the time to stop monitoring the series. + For example, `30m`, `2h`, `3h15m`, etc. + +5. Click the green **{{< icon "check" >}}** in the top right corner to save the check. + +## Clone a check +Create a new check by cloning an existing check. + +1. Go to **Alerts > Alerts** in the navigation on the left. + + {{< nav-icon "alerts" >}} + +2. Click the **{{< icon "gear" >}}** icon next to the check you want to clone + and then click **Clone**. diff --git a/content/influxdb/v2.5/monitor-alert/checks/delete.md b/content/influxdb/v2.5/monitor-alert/checks/delete.md new file mode 100644 index 000000000..e59747196 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/checks/delete.md @@ -0,0 +1,33 @@ +--- +title: Delete checks +seotitle: Delete monitoring checks in InfluxDB +description: > + Delete checks in the InfluxDB UI. +menu: + influxdb_2_5: + parent: Manage checks +weight: 204 +related: + - /influxdb/v2.5/monitor-alert/notification-rules/ + - /influxdb/v2.5/monitor-alert/notification-endpoints/ +--- + +If you no longer need a check, use the InfluxDB user interface (UI) to delete it. + +{{% warn %}} +Deleting a check cannot be undone. +{{% /warn %}} + +1. In the navigation menu on the left, select **Alerts > Alerts**. + + {{< nav-icon "alerts" >}} + +2. Click the **{{< icon "delete" >}}** icon, and then click **{{< caps >}}Confirm{{< /caps >}}**. + +After a check is deleted, all statuses generated by the check remain in the `_monitoring` +bucket until the retention period for the bucket expires. + +{{% note %}} +You can also [disable a check](/influxdb/v2.5/monitor-alert/checks/update/#enable-or-disable-a-check) +without having to delete it. +{{% /note %}} diff --git a/content/influxdb/v2.5/monitor-alert/checks/update.md b/content/influxdb/v2.5/monitor-alert/checks/update.md new file mode 100644 index 000000000..66389b40b --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/checks/update.md @@ -0,0 +1,60 @@ +--- +title: Update checks +seotitle: Update monitoring checks in InfluxDB +description: > + Update, rename, enable or disable checks in the InfluxDB UI. +menu: + influxdb_2_5: + parent: Manage checks +weight: 203 +related: + - /influxdb/v2.5/monitor-alert/notification-rules/ + - /influxdb/v2.5/monitor-alert/notification-endpoints/ +--- + +Update checks in the InfluxDB user interface (UI). +Common updates include: + +- [Update check queries and logic](#update-check-queries-and-logic) +- [Enable or disable a check](#enable-or-disable-a-check) +- [Rename a check](#rename-a-check) +- [Add or update a check description](#add-or-update-a-check-description) +- [Add a label to a check](#add-a-label-to-a-check) + +To update checks, select **Alerts > Alerts** in the navigation menu on the left. + +{{< nav-icon "alerts" >}} + + +## Update check queries and logic +1. Click the name of the check you want to update. The check builder appears. +2. To edit the check query, click **{{< caps >}}1. Define Query{{< /caps >}}** at the top of the check builder window. +3. To edit the check logic, click **{{< caps >}}2. Configure Check{{< /caps >}}** at the top of the check builder window. + +_For details about using the check builder, see [Create checks](/influxdb/v2.5/monitor-alert/checks/create/)._ + +## Enable or disable a check +Click the {{< icon "toggle" >}} toggle next to a check to enable or disable it. + +## Rename a check +1. Hover over the name of the check you want to update. +2. Click the **{{< icon "edit" >}}** icon that appears next to the check name. +2. Enter a new name and click out of the name field or press enter to save. + +_You can also rename a check in the [check builder](#update-check-queries-and-logic)._ + +## Add or update a check description +1. Hover over the check description you want to update. +2. Click the **{{< icon "edit" >}}** icon that appears next to the description. +2. Enter a new description and click out of the name field or press enter to save. + +## Add a label to a check +1. Click **{{< icon "add-label" >}} Add a label** next to the check you want to add a label to. + The **Add Labels** box appears. +2. To add an existing label, select the label from the list. +3. To create and add a new label: + - In the search field, enter the name of the new label. The **Create Label** box opens. + - In the **Description** field, enter an optional description for the label. + - Select a color for the label. + - Click **{{< caps >}}Create Label{{< /caps >}}**. +4. To remove a label, click **{{< icon "x" >}}** on the label. diff --git a/content/influxdb/v2.5/monitor-alert/checks/view.md b/content/influxdb/v2.5/monitor-alert/checks/view.md new file mode 100644 index 000000000..eeef58ec6 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/checks/view.md @@ -0,0 +1,37 @@ +--- +title: View checks +seotitle: View monitoring checks in InfluxDB +description: > + View check details and statuses and notifications generated by checks in the InfluxDB UI. +menu: + influxdb_2_5: + parent: Manage checks +weight: 202 +related: + - /influxdb/v2.5/monitor-alert/notification-rules/ + - /influxdb/v2.5/monitor-alert/notification-endpoints/ +--- + +View check details and statuses and notifications generated by checks in the InfluxDB user interface (UI). + +- [View a list of all checks](#view-a-list-of-all-checks) +- [View check details](#view-check-details) +- [View statuses generated by a check](#view-statuses-generated-by-a-check) +- [View notifications triggered by a check](#view-notifications-triggered-by-a-check) + +To view checks, click **Alerts > Alerts** in navigation menu on the left. + +{{< nav-icon "alerts" >}} + +## View a list of all checks +The **{{< caps >}}Checks{{< /caps >}}** section of the Alerts landing page displays all existing checks. + +## View check details +Click the name of the check you want to view. +The check builder appears. +Here you can view the check query and logic. + +## View statuses generated by a check +1. Click the **{{< icon "view" >}}** icon on the check. +2. Click **View History**. + The Statuses History page displays statuses generated by the selected check. diff --git a/content/influxdb/v2.5/monitor-alert/custom-checks.md b/content/influxdb/v2.5/monitor-alert/custom-checks.md new file mode 100644 index 000000000..537be9d82 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/custom-checks.md @@ -0,0 +1,96 @@ +--- +title: Create custom checks +seotitle: Custom checks +description: > + Create custom checks with a Flux task. +menu: + influxdb_2_5: + parent: Monitor & alert +weight: 201 +influxdb/v2.5/tags: [alerts, checks, tasks, Flux] +--- + +In the UI, you can create two kinds of [checks](/influxdb/v2.5/reference/glossary/#check): +[`threshold`](/influxdb/v2.5/monitor-alert/checks/create/#threshold-check) and +[`deadman`](/influxdb/v2.5/monitor-alert/checks/create/#deadman-check). + +Using a Flux task, you can create a custom check that provides a couple advantages: + +- Customize and transform the data you would like to use for the check. +- Set up custom criteria for your alert (other than `threshold` and `deadman`). + +## Create a task + +1. In the InfluxDB UI, select **Tasks** in the navigation menu on the left. + + {{< nav-icon "tasks" >}} + +2. Click **{{< caps >}}{{< icon "plus" >}} Create Task{{< /caps >}}**. +3. In the **Name** field, enter a descriptive name, + and then enter how often to run the task in the **Every** field (for example, `10m`). + For more detail, such as using cron syntax or including an offset, see [Task configuration options](/influxdb/v2.5/process-data/task-options/). +4. Enter the Flux script for your custom check, including the [`monitor.check`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/monitor/check/) function. + +{{% note %}} +Use the [`/api/v2/checks/{checkID}/query` API endpoint](/influxdb/v2.5/api/#operation/DeleteDashboardsIDOwnersID) +to see the Flux code for a check built in the UI. +This can be useful for constructing custom checks. +{{% /note %}} + +### Example: Monitor failed tasks + +The script below is fairly complex, and can be used as a framework for similar tasks. +It does the following: + +- Import the necessary `influxdata/influxdb/monitor` package, and other packages for data processing. +- Query the `_tasks` bucket to retrieve all statuses generated by your check. +- Set the `_level` to alert on, for example, `crit`, `warn`, `info`, or `ok`. +- Create a `check` object that specifies an ID, name, and type for the check. +- Define the `ok` and `crit` statuses. +- Execute the `monitor` function on the `check` using the `task_data`. + +#### Example alert task script + +```js +import "strings" +import "regexp" +import "influxdata/influxdb/monitor" +import "influxdata/influxdb/schema" + +option task = {name: "Failed Tasks Check", every: 1h, offset: 4m} + +task_data = from(bucket: "_tasks") + |> range(start: -task.every) + |> filter(fn: (r) => r["_measurement"] == "runs") + |> filter(fn: (r) => r["_field"] == "logs") + |> map(fn: (r) => ({r with name: strings.split(v: regexp.findString(r: /option task = \{([^\}]+)/, v: r._value), t: "\\\\\\\"")[1]})) + |> drop(columns: ["_value", "_start", "_stop"]) + |> group(columns: ["name", "taskID", "status", "_measurement"]) + |> map(fn: (r) => ({r with _value: if r.status == "failed" then 1 else 0})) + |> last() + +check = { + // 16 characters, alphanumeric + _check_id: "0000000000000001", + // Name string + _check_name: "Failed Tasks Check", + // Check type (threshold, deadman, or custom) + _type: "custom", + tags: {}, +} +ok = (r) => r["logs"] == 0 +crit = (r) => r["logs"] == 1 +messageFn = (r) => "The task: ${r.taskID} - ${r.name} has a status of ${r.status}" + +task_data + |> schema["fieldsAsCols"]() + |> monitor["check"](data: check, messageFn: messageFn, ok: ok, crit: crit) +``` + +{{% note %}} +Creating a custom check does not send a notification email. +For information on how to create notification emails, see +[Create notification endpoints](/influxdb/v2.5/monitor-alert/notification-endpoints/create), +[Create notification rules](/influxdb/v2.5/monitor-alert/notification-rules/create), +and [Send alert email](/influxdb/v2.5/monitor-alert/send-email/) +{{% /note %}} diff --git a/content/influxdb/v2.5/monitor-alert/notification-endpoints/_index.md b/content/influxdb/v2.5/monitor-alert/notification-endpoints/_index.md new file mode 100644 index 000000000..3487bd786 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/notification-endpoints/_index.md @@ -0,0 +1,19 @@ +--- +title: Manage notification endpoints +list_title: Manage notification endpoints +description: > + Create, read, update, and delete endpoints in the InfluxDB UI. +influxdb/v2.5/tags: [monitor, endpoints, notifications, alert] +menu: + influxdb_2_5: + parent: Monitor & alert +weight: 102 +related: + - /influxdb/v2.5/monitor-alert/checks/ + - /influxdb/v2.5/monitor-alert/notification-rules/ +--- + +Notification endpoints store information to connect to a third-party service. +Create a connection to a HTTP, Slack, or PagerDuty endpoint. + +{{< children >}} diff --git a/content/influxdb/v2.5/monitor-alert/notification-endpoints/create.md b/content/influxdb/v2.5/monitor-alert/notification-endpoints/create.md new file mode 100644 index 000000000..7e5657367 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/notification-endpoints/create.md @@ -0,0 +1,67 @@ +--- +title: Create notification endpoints +description: > + Create notification endpoints to send alerts on your time series data. +menu: + influxdb_2_5: + name: Create endpoints + parent: Manage notification endpoints +weight: 201 +related: + - /influxdb/v2.5/monitor-alert/checks/ + - /influxdb/v2.5/monitor-alert/notification-rules/ +--- + +To send notifications about changes in your data, start by creating a notification endpoint to a third-party service. After creating notification endpoints, [create notification rules](/influxdb/v2.5/monitor-alert/notification-rules/create) to send alerts to third-party services on [check statuses](/influxdb/v2.5/monitor-alert/checks/create). + +{{% cloud-only %}} + +#### Endpoints available in InfluxDB Cloud +The following endpoints are available for the InfluxDB Cloud Free Plan and Usage-based Plan: + +| Endpoint | Free Plan | Usage-based Plan | +|:-------- |:-------------------: |:----------------------------:| +| **Slack** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | +| **PagerDuty** | | **{{< icon "check" >}}** | +| **HTTP** | | **{{< icon "check" >}}** | + +{{% /cloud-only %}} + +## Create a notification endpoint + +1. In the navigation menu on the left, select **Alerts > Alerts**. + + {{< nav-icon "alerts" >}} + +2. Select **{{< caps >}}Notification Endpoints{{< /caps >}}**. +3. Click **{{< caps >}}{{< icon "plus" >}} Create{{< /caps >}}**. +4. From the **Destination** drop-down list, select a destination endpoint to send notifications to. + {{% cloud-only %}}_See [available endpoints](#endpoints-available-in-influxdb-cloud)._{{% /cloud-only %}} +5. In the **Name** and **Description** fields, enter a name and description for the endpoint. +6. Enter information to connect to the endpoint: + + - **For HTTP**, enter the **URL** to send the notification. + Select the **auth method** to use: **None** for no authentication. + To authenticate with a username and password, select **Basic** and then + enter credentials in the **Username** and **Password** fields. + To authenticate with an API token, select **Bearer**, and then enter the + API token in the **Token** field. + + - **For Slack**, create an [Incoming WebHook](https://api.slack.com/incoming-webhooks#posting_with_webhooks) + in Slack, and then enter your webHook URL in the **Slack Incoming WebHook URL** field. + + - **For PagerDuty**: + - [Create a new service](https://support.pagerduty.com/docs/services-and-integrations#section-create-a-new-service), + [add an integration for your service](https://support.pagerduty.com/docs/services-and-integrations#section-add-integrations-to-an-existing-service), + and then enter the PagerDuty integration key for your new service in the **Routing Key** field. + - The **Client URL** provides a useful link in your PagerDuty notification. + Enter any URL that you'd like to use to investigate issues. + This URL is sent as the `client_url` property in the PagerDuty trigger event. + By default, the **Client URL** is set to your Monitoring & Alerting History + page, and the following included in the PagerDuty trigger event: + + ```json + "client_url": "http://localhost:8086/orgs//alert-history" + ``` + +6. Click **{{< caps >}}Create Notification Endpoint{{< /caps >}}**. diff --git a/content/influxdb/v2.5/monitor-alert/notification-endpoints/delete.md b/content/influxdb/v2.5/monitor-alert/notification-endpoints/delete.md new file mode 100644 index 000000000..a3e744aee --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/notification-endpoints/delete.md @@ -0,0 +1,28 @@ +--- +title: Delete notification endpoints +description: > + Delete a notification endpoint in the InfluxDB UI. +menu: + influxdb_2_5: + name: Delete endpoints + parent: Manage notification endpoints +weight: 204 +related: + - /influxdb/v2.5/monitor-alert/checks/ + - /influxdb/v2.5/monitor-alert/notification-rules/ +--- + +If notifications are no longer sent to an endpoint, complete the steps below to +delete the endpoint, and then [update notification rules](/influxdb/v2.5/monitor-alert/notification-rules/update) +with a new notification endpoint as needed. + +## Delete a notification endpoint + +1. In the navigation menu on the left, select **Alerts > Alerts**. + + {{< nav-icon "alerts" >}} + +2. Select **{{< caps >}}Notification Endpoints{{< /caps >}}** and find the rule + you want to delete. +3. Click the **{{< icon "trash" >}}** icon on the notification you want to delete + and then click **{{< caps >}}Confirm{{< /caps >}}**. diff --git a/content/influxdb/v2.5/monitor-alert/notification-endpoints/update.md b/content/influxdb/v2.5/monitor-alert/notification-endpoints/update.md new file mode 100644 index 000000000..d8c30b34b --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/notification-endpoints/update.md @@ -0,0 +1,55 @@ +--- +title: Update notification endpoints +description: > + Update notification endpoints in the InfluxDB UI. +menu: + influxdb_2_5: + name: Update endpoints + parent: Manage notification endpoints +weight: 203 +related: + - /influxdb/v2.5/monitor-alert/checks/ + - /influxdb/v2.5/monitor-alert/notification-rules/ +--- + +Complete the following steps to update notification endpoint details. +To update the notification endpoint selected for a notification rule, see [update notification rules](/influxdb/v2.5/monitor-alert/notification-rules/update/). + +**To update a notification endpoint** + +1. In the navigation menu on the left, select **Alerts > Alerts**. + + {{< nav-icon "alerts" >}} + +2. Select **{{< caps >}}Notification Endpoints{{< /caps >}}** and then do the following as needed: + + - [Update the name or description for notification endpoint](#update-the-name-or-description-for-notification-endpoint) + - [Change endpoint details](#change-endpoint-details) + - [Disable notification endpoint](#disable-notification-endpoint) + - [Add a label to notification endpoint](#add-a-label-to-notification-endpoint) + +## Update the name or description for notification endpoint +1. Hover over the name or description of the endpoint and click the pencil icon + (**{{< icon "edit" >}}**) to edit the field. +2. Click outside of the field to save your changes. + +## Change endpoint details +1. Click the name of the endpoint to update. +2. Update details as needed, and then click **Edit Notification Endpoint**. + For details about each field, see [Create notification endpoints](/influxdb/v2.5/monitor-alert/notification-endpoints/create/). + +## Disable notification endpoint +Click the {{< icon "toggle" >}} toggle to disable the notification endpoint. + +## Add a label to notification endpoint +1. Click **{{< icon "add-label" >}} Add a label** next to the endpoint you want to add a label to. + The **Add Labels** box opens. +2. To add an existing label, select the label from the list. +3. To create and add a new label: + + - In the search field, enter the name of the new label. The **Create Label** box opens. + - In the **Description** field, enter an optional description for the label. + - Select a color for the label. + - Click **{{< caps >}}Create Label{{< /caps >}}**. + +4. To remove a label, click **{{< icon "x" >}}** on the label. diff --git a/content/influxdb/v2.5/monitor-alert/notification-endpoints/view.md b/content/influxdb/v2.5/monitor-alert/notification-endpoints/view.md new file mode 100644 index 000000000..1d54e14d1 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/notification-endpoints/view.md @@ -0,0 +1,40 @@ +--- +title: View notification endpoint history +seotitle: View notification endpoint details and history +description: > + View notification endpoint details and history in the InfluxDB UI. +menu: + influxdb_2_5: + name: View endpoint history + parent: Manage notification endpoints +weight: 202 +related: + - /influxdb/v2.5/monitor-alert/checks/ + - /influxdb/v2.5/monitor-alert/notification-rules/ +--- + +View notification endpoint details and history in the InfluxDB user interface (UI). + + +1. In the navigation menu on the left, select **Alerts**. + + {{< nav-icon "alerts" >}} + +2. Select **{{< caps >}}Notification Endpoints{{< /caps >}}**. + + - [View notification endpoint details](#view-notification-endpoint-details) + - [View history notification endpoint history](#view-notification-endpoint-history), including statues and notifications sent to the endpoint + +## View notification endpoint details +On the notification endpoints page: + +1. Click the name of the notification endpoint you want to view. +2. View the notification endpoint destination, name, and information to connect to the endpoint. + +## View notification endpoint history +On the notification endpoints page, click the **{{< icon "gear" >}}** icon, +and then click **View History**. +The Check Statuses History page displays: + +- Statuses generated for the selected notification endpoint +- Notifications sent to the selected notification endpoint diff --git a/content/influxdb/v2.5/monitor-alert/notification-rules/_index.md b/content/influxdb/v2.5/monitor-alert/notification-rules/_index.md new file mode 100644 index 000000000..ee3377d67 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/notification-rules/_index.md @@ -0,0 +1,17 @@ +--- +title: Manage notification rules +description: > + Manage notification rules in InfluxDB. +weight: 103 +influxdb/v2.5/tags: [monitor, notifications, alert] +menu: + influxdb_2_5: + parent: Monitor & alert +related: + - /influxdb/v2.5/monitor-alert/checks/ + - /influxdb/v2.5/monitor-alert/notification-endpoints/ +--- + +The following articles provide information on managing your notification rules: + +{{< children >}} diff --git a/content/influxdb/v2.5/monitor-alert/notification-rules/create.md b/content/influxdb/v2.5/monitor-alert/notification-rules/create.md new file mode 100644 index 000000000..b7f99c338 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/notification-rules/create.md @@ -0,0 +1,44 @@ +--- +title: Create notification rules +description: > + Create notification rules to send alerts on your time series data. +weight: 201 +menu: + influxdb_2_5: + parent: Manage notification rules +related: + - /influxdb/v2.5/monitor-alert/checks/ + - /influxdb/v2.5/monitor-alert/notification-endpoints/ +--- + +Once you've set up checks and notification endpoints, create notification rules to alert you. +_For details, see [Manage checks](/influxdb/v2.5/monitor-alert/checks/) and +[Manage notification endpoints](/influxdb/v2.5/monitor-alert/notification-endpoints/)._ + + +1. In the navigation menu on the left, select **Alerts > Alerts**. + + {{< nav-icon "alerts" >}} + +2. Select **{{< caps >}}Notification Rules{{< /caps >}}** near to top of the page. + + - [Create a new notification rule in the UI](#create-a-new-notification-rule-in-the-ui) + - [Clone an existing notification rule in the UI](#clone-an-existing-notification-rule-in-the-ui) + +## Create a new notification rule + +1. On the notification rules page, click **{{< caps >}}{{< icon "plus" >}} Create{{< /caps >}}**. +2. Complete the **About** section: + 1. In the **Name** field, enter a name for the notification rule. + 2. In the **Schedule Every** field, enter how frequently the rule should run. + 3. In the **Offset** field, enter an offset time. For example,if a task runs on the hour, a 10m offset delays the task to 10 minutes after the hour. Time ranges defined in the task are relative to the specified execution time. +3. In the **Conditions** section, build a condition using a combination of status and tag keys. + - Next to **When status is equal to**, select a status from the drop-down field. + - Next to **AND When**, enter one or more tag key-value pairs to filter by. +4. In the **Message** section, select an endpoint to notify. +5. Click **{{< caps >}}Create Notification Rule{{< /caps >}}**. + +## Clone an existing notification rule + +On the notification rules page, click the **{{< icon "gear" >}}** icon and select **Clone**. +The cloned rule appears. diff --git a/content/influxdb/v2.5/monitor-alert/notification-rules/delete.md b/content/influxdb/v2.5/monitor-alert/notification-rules/delete.md new file mode 100644 index 000000000..771ea9e46 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/notification-rules/delete.md @@ -0,0 +1,24 @@ +--- +title: Delete notification rules +description: > + If you no longer need to receive an alert, delete the associated notification rule. +weight: 204 +menu: + influxdb_2_5: + parent: Manage notification rules +related: + - /influxdb/v2.5/monitor-alert/checks/ + - /influxdb/v2.5/monitor-alert/notification-endpoints/ +--- + +If you no longer need to receive an alert, delete the associated notification rule. + +## Delete a notification rule + +1. In the navigation menu on the left, select **Alerts > Alerts**. + + {{< nav-icon "alerts" >}} + +2. Select **{{< caps >}}Notification Rules{{< /caps >}}** near to top of the page. +3. Click the **{{< icon "trash" >}}** icon on the notification rule you want to delete. +4. Click **{{< caps >}}Confirm{{< /caps >}}**. diff --git a/content/influxdb/v2.5/monitor-alert/notification-rules/update.md b/content/influxdb/v2.5/monitor-alert/notification-rules/update.md new file mode 100644 index 000000000..29f93fbbc --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/notification-rules/update.md @@ -0,0 +1,50 @@ +--- +title: Update notification rules +description: > + Update notification rules to update the notification message or change the schedule or conditions. +weight: 203 +menu: + influxdb_2_5: + parent: Manage notification rules +related: + - /influxdb/v2.5/monitor-alert/checks/ + - /influxdb/v2.5/monitor-alert/notification-endpoints/ +--- + +Update notification rules to update the notification message or change the schedule or conditions. + + +1. In the navigation menu on the left, select **Alerts > Alerts**. + + {{< nav-icon "alerts" >}} + +2. Select **{{< caps >}}Notification Rules{{< /caps >}}** near to top of the page. + +- [Update the name or description for notification rules](#update-the-name-or-description-for-notification-rules) +- [Enable or disable notification rules](#enable-or-disable-notification-rules) +- [Add a label to notification rules](#add-a-label-to-notification-rules) + +## Update the name or description for notification rules +On the Notification Rules page: + +1. Hover over the name or description of a rule and click the pencil icon + (**{{< icon "edit" >}}**) to edit the field. +2. Click outside of the field to save your changes. + +## Enable or disable notification rules +On the notification rules page, click the {{< icon "toggle" >}} toggle to +enable or disable the notification rule. + +## Add a label to notification rules +On the notification rules page: + +1. Click **{{< icon "add-label" >}} Add a label** + next to the rule you want to add a label to. + The **Add Labels** box opens. +2. To add an existing label, select the label from the list. +3. To create and add a new label: + - In the search field, enter the name of the new label. The **Create Label** box opens. + - In the **Description** field, enter an optional description for the label. + - Select a color for the label. + - Click **{{< caps >}}Create Label{{< /caps >}}**. +4. To remove a label, click **{{< icon "x" >}}** on the label. diff --git a/content/influxdb/v2.5/monitor-alert/notification-rules/view.md b/content/influxdb/v2.5/monitor-alert/notification-rules/view.md new file mode 100644 index 000000000..35c3253ab --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/notification-rules/view.md @@ -0,0 +1,44 @@ +--- +title: View notification rules +description: > + Update notification rules to update the notification message or change the schedule or conditions. +weight: 202 +menu: + influxdb_2_5: + parent: Manage notification rules +related: + - /influxdb/v2.5/monitor-alert/checks/ + - /influxdb/v2.5/monitor-alert/notification-endpoints/ +--- + +View notification rule details and statuses and notifications generated by notification rules in the InfluxDB user interface (UI). + +- [View a list of all notification rules](#view-a-list-of-all-notification-rules) +- [View notification rule details](#view-notification-rule-details) +- [View statuses generated by a check](#view-statuses-generated-by-a-notification-rule) +- [View notifications triggered by a notification rule](#view-notifications-triggered-by-a-notification-rule) + +**To view notification rules:** + +1. In the navigation menu on the left, select **Alerts**. + + {{< nav-icon "alerts" >}} + +2. Select **{{< caps >}}Notification Rules{{< /caps >}}** near to top of the page. + +## View a list of all notification rules +The **{{< caps >}}Notification Rules{{< /caps >}}** section of the Alerts landing page displays all existing checks. + +## View notification rule details +Click the name of the check you want to view. +The check builder appears. +Here you can view the check query and logic. + +## View statuses generated by a notification rule +Click the **{{< icon "gear" >}}** icon on the notification rule, and then **View History**. +The Statuses History page displays statuses generated by the selected check. + +## View notifications triggered by a notification rule +1. Click the **{{< icon "gear" >}}** icon on the notification rule, and then **View History**. +2. In the top left corner, click **{{< caps >}}Notifications{{< /caps >}}**. + The Notifications History page displays notifications initiated by the selected notification rule. diff --git a/content/influxdb/v2.5/monitor-alert/send-email.md b/content/influxdb/v2.5/monitor-alert/send-email.md new file mode 100644 index 000000000..5ae69ec6c --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/send-email.md @@ -0,0 +1,295 @@ +--- +title: Send alert email +description: > + Send an alert email. +menu: + influxdb_2_5: + parent: Monitor & alert +weight: 104 +influxdb/v2.5/tags: [alert, email, notifications, check] +related: + - /influxdb/v2.5/monitor-alert/checks/ +--- + +Send an alert email using a third-party service, such as [SendGrid](https://sendgrid.com/), [Amazon Simple Email Service (SES)](https://aws.amazon.com/ses/), [Mailjet](https://www.mailjet.com/), or [Mailgun](https://www.mailgun.com/). To send an alert email, complete the following steps: + +1. [Create a check](/influxdb/v2.5/monitor-alert/checks/create/#create-a-check-in-the-influxdb-ui) to identify the data to monitor and the status to alert on. +2. Set up your preferred email service (sign up, retrieve API credentials, and send test email): + - **SendGrid**: See [Getting Started With the SendGrid API](https://sendgrid.com/docs/API_Reference/api_getting_started.html) + - **AWS Simple Email Service (SES)**: See [Using the Amazon SES API](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email.html). Your AWS SES request, including the `url` (endpoint), authentication, and the structure of the request may vary. For more information, see [Amazon SES API requests](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/using-ses-api-requests.html) and [Authenticating requests to the Amazon SES API](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/using-ses-api-authentication.html). + - **Mailjet**: See [Getting Started with Mailjet](https://dev.mailjet.com/email/guides/getting-started/) + - **Mailgun**: See [Mailgun Signup](https://signup.mailgun.com/new/signup) +3. [Create an alert email task](#create-an-alert-email-task) to call your email service and send an alert email. + + {{% note %}} + In the procedure below, we use the **Task** page in the InfluxDB UI (user interface) to create a task. Explore other ways to [create a task](/influxdb/v2.5/process-data/manage-tasks/create-task/). + {{% /note %}} + +### Create an alert email task + +1. In the InfluxDB UI, select **Tasks** in the navigation menu on the left. + + {{< nav-icon "tasks" >}} + +2. Click **{{< caps >}}{{< icon "plus" >}} Create Task{{< /caps >}}**. +3. In the **Name** field, enter a descriptive name, for example, **Send alert email**, + and then enter how often to run the task in the **Every** field, for example, `10m`. + For more detail, such as using cron syntax or including an offset, see [Task configuration options](/influxdb/v2.5/process-data/task-options/). + +4. In the right panel, enter the following detail in your **task script** (see [examples below](#examples)): + - Import the [Flux HTTP package](/{{< latest "flux" >}}/stdlib/http/). + - (Optional) Store your API key as a secret for reuse. + First, [add your API key as a secret](/influxdb/v2.5/security/secrets/manage-secrets/add/), + and then import the [Flux InfluxDB Secrets package](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/secrets/). + - Query the `statuses` measurement in the `_monitoring` bucket to retrieve all statuses generated by your check. + - Set the time range to monitor; use the same interval that the task is scheduled to run. For example, `range (start: -task.every)`. + - Set the `_level` to alert on, for example, `crit`, `warn`, `info`, or `ok`. + - Use the `map()` function to evaluate the criteria to send an alert using `http.post()`. + - Specify your email service `url` (endpoint), include applicable request `headers`, and verify your request `data` format follows the format specified for your email service. + +#### Examples + +{{< tabs-wrapper >}} +{{% tabs %}} +[SendGrid](#) +[AWS SES](#) +[Mailjet](#) +[Mailgun](#) +{{% /tabs %}} + + +{{% tab-content %}} + +The example below uses the SendGrid API to send an alert email when more than 3 critical statuses occur since the previous task run. + +```js +import "http" +import "json" +// Import the Secrets package if you store your API key as a secret. +// For detail on how to do this, see Step 4 above. +import "influxdata/influxdb/secrets" + +// Retrieve the secret if applicable. Otherwise, skip this line +// and add the API key as the Bearer token in the Authorization header. +SENDGRID_APIKEY = secrets.get(key: "SENDGRID_APIKEY") + +numberOfCrits = from(bucket: "_monitoring") + |> range(start: -task.every) + |> filter(fn: (r) => r._measurement == "statuses" and r._level == "crit") + |> count() + +numberOfCrits + |> map( + fn: (r) => if r._value > 3 then + {r with _value: http.post( + url: "https://api.sendgrid.com/v3/mail/send", + headers: {"Content-Type": "application/json", "Authorization": "Bearer ${SENDGRID_APIKEY}"}, + data: json.encode( + v: { + "personalizations": [ + { + "to": [ + { + "email": "jane.doe@example.com" + } + ] + } + ], + "from": { + "email": "john.doe@example.com" + }, + "subject": "InfluxDB critical alert", + "content": [ + { + "type": "text/plain", + "value": "There have been ${r._value} critical statuses." + } + ] + } + ) + )} + else + {r with _value: 0}, + ) +``` + +{{% /tab-content %}} + + +{{% tab-content %}} + +The example below uses the AWS SES API v2 to send an alert email when more than 3 critical statuses occur since the last task run. + +{{% note %}} +Your AWS SES request, including the `url` (endpoint), authentication, and the structure of the request may vary. For more information, see [Amazon SES API requests](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/using-ses-api-requests.html) and [Authenticating requests to the Amazon SES API](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/using-ses-api-authentication.html). We recommend signing your AWS API requests using the [Signature Version 4 signing process](https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html). +{{% /note %}} + +```js +import "http" +import "json" +// Import the Secrets package if you store your API credentials as secrets. +// For detail on how to do this, see Step 4 above. +import "influxdata/influxdb/secrets" + +// Retrieve the secrets if applicable. Otherwise, skip this line +// and add the API key as the Bearer token in the Authorization header. +AWS_AUTH_ALGORITHM = secrets.get(key: "AWS_AUTH_ALGORITHM") +AWS_CREDENTIAL = secrets.get(key: "AWS_CREDENTIAL") +AWS_SIGNED_HEADERS = secrets.get(key: "AWS_SIGNED_HEADERS") +AWS_CALCULATED_SIGNATURE = secrets.get(key: "AWS_CALCULATED_SIGNATURE") + +numberOfCrits = from(bucket: "_monitoring") + |> range(start: -task.every) + |> filter(fn: (r) => r.measurement == "statuses" and r._level == "crit") + |> count() + +numberOfCrits + |> map( + fn: (r) => if r._value > 3 then + {r with _value: http.post( + url: "https://email.your-aws-region.amazonaws.com/sendemail/v2/email/outbound-emails", + headers: { + "Content-Type": "application/json", + "Authorization": "Bearer ${AWS_AUTH_ALGORITHM}${AWS_CREDENTIAL}${AWS_SIGNED_HEADERS}${AWS_CALCULATED_SIGNATURE}"}, + data: json.encode(v: { + "Content": { + "Simple": { + "Body": { + "Text": { + "Charset": "UTF-8", + "Data": "There have been ${r._value} critical statuses." + } + }, + "Subject": { + "Charset": "UTF-8", + "Data": "InfluxDB critical alert" + } + } + }, + "Destination": { + "ToAddresses": [ + "john.doe@example.com" + ] + } + } + ) + )} + else + {r with _value: 0}, + ) +``` + +For details on the request syntax, see [SendEmail API v2 reference](https://docs.aws.amazon.com/ses/latest/APIReference-V2/API_SendEmail.html). + +{{% /tab-content %}} + + +{{% tab-content %}} + +The example below uses the Mailjet Send API to send an alert email when more than 3 critical statuses occur since the last task run. + +{{% note %}} +To view your Mailjet API credentials, sign in to Mailjet and open the [API Key Management page](https://app.mailjet.com/account/api_keys). +{{% /note %}} + +```js +import "http" +import "json" +// Import the Secrets package if you store your API keys as secrets. +// For detail on how to do this, see Step 4 above. +import "influxdata/influxdb/secrets" + +// Retrieve the secrets if applicable. Otherwise, skip this line +// and add the API keys as Basic credentials in the Authorization header. +MAILJET_APIKEY = secrets.get(key: "MAILJET_APIKEY") +MAILJET_SECRET_APIKEY = secrets.get(key: "MAILJET_SECRET_APIKEY") + +numberOfCrits = from(bucket: "_monitoring") + |> range(start: -task.every) + |> filter(fn: (r) => r.measurement == "statuses" and "r.level" == "crit") + |> count() + +numberOfCrits + |> map( + fn: (r) => if r._value > 3 then + {r with + _value: http.post( + url: "https://api.mailjet.com/v3.1/send", + headers: { + "Content-type": "application/json", + "Authorization": "Basic ${MAILJET_APIKEY}:${MAILJET_SECRET_APIKEY}" + }, + data: json.encode( + v: { + "Messages": [ + { + "From": {"Email": "jane.doe@example.com"}, + "To": [{"Email": "john.doe@example.com"}], + "Subject": "InfluxDB critical alert", + "TextPart": "There have been ${r._value} critical statuses.", + "HTMLPart": "

${r._value} critical statuses

There have been ${r._value} critical statuses.", + }, + ], + }, + ), + ), + } + else + {r with _value: 0}, + ) +``` + +{{% /tab-content %}} + + + +{{% tab-content %}} + +The example below uses the Mailgun API to send an alert email when more than 3 critical statuses occur since the last task run. + +{{% note %}} +To view your Mailgun API keys, sign in to Mailjet and open [Account Security - API security](https://app.mailgun.com/app/account/security/api_keys). Mailgun requires that a domain be specified via Mailgun. A domain is automatically created for you when you first set up your account. You must include this domain in your `url` endpoint (for example, `https://api.mailgun.net/v3/YOUR_DOMAIN` or `https://api.eu.mailgun.net/v3/YOUR_DOMAIN`. If you're using a free version of Mailgun, you can set up a maximum of five authorized recipients (to receive email alerts) for your domain. To view your Mailgun domains, sign in to Mailgun and view the [Domains page](https://app.mailgun.com/app/sending/domains). +{{% /note %}} + +```js +import "http" +import "json" +// Import the Secrets package if you store your API key as a secret. +// For detail on how to do this, see Step 4 above. +import "influxdata/influxdb/secrets" + +// Retrieve the secret if applicable. Otherwise, skip this line +// and add the API key as the Bearer token in the Authorization header. +MAILGUN_APIKEY = secrets.get(key: "MAILGUN_APIKEY") + +numberOfCrits = from(bucket: "_monitoring") + |> range(start: -task.every) + |> filter(fn: (r) => r["_measurement"] == "statuses") + |> filter(fn: (r) => r["_level"] == "crit") + |> count() + +numberOfCrits + |> map( + fn: (r) => if r._value > 1 then + {r with _value: http.post( + url: "https://api.mailgun.net/v3/YOUR_DOMAIN/messages", + headers: { + "Content-type": "application/json", + "Authorization": "Basic api:${MAILGUN_APIKEY}" + }, + data: json.encode(v: { + "from": "Username ", + "to": "email@example.com", + "subject": "InfluxDB critical alert", + "text": "There have been ${r._value} critical statuses." + } + ) + )} + else + {r with _value: 0}, + ) +``` + +{{% /tab-content %}} + +{{< /tabs-wrapper >}} diff --git a/content/influxdb/v2.5/monitor-alert/templates/_index.md b/content/influxdb/v2.5/monitor-alert/templates/_index.md new file mode 100644 index 000000000..7ed3ce257 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/templates/_index.md @@ -0,0 +1,14 @@ +--- +title: Monitor with templates +description: > + Use community templates to monitor data in many supported environments. Monitor infrastructure, networking, IoT, software, security, TICK stack, and more. +menu: + influxdb_2_5: + parent: Monitor & alert +weight: 104 +influxdb/v2.5/tags: [monitor, templates] +--- + +Use one of our community templates to quickly set up InfluxDB (with a bucket and dashboard) to collect, analyze, and monitor data in supported environments. + +{{< children >}} diff --git a/content/influxdb/v2.5/monitor-alert/templates/infrastructure/_index.md b/content/influxdb/v2.5/monitor-alert/templates/infrastructure/_index.md new file mode 100644 index 000000000..5ce335ec1 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/templates/infrastructure/_index.md @@ -0,0 +1,14 @@ +--- +title: Monitor infrastructure +description: > + Use one of our community templates to quickly set up InfluxDB (with a bucket and dashboard) to collect, analyze, and monitor your infrastructure. +menu: + influxdb_2_5: + parent: Monitor with templates +weight: 104 +influxdb/v2.5/tags: [monitor, templates, infrastructure] +--- + +Use one of our community templates to quickly set up InfluxDB (with a bucket and dashboard) to collect, analyze, and monitor your infrastructure. + +{{< children >}} diff --git a/content/influxdb/v2.5/monitor-alert/templates/infrastructure/aws.md b/content/influxdb/v2.5/monitor-alert/templates/infrastructure/aws.md new file mode 100644 index 000000000..6658dca67 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/templates/infrastructure/aws.md @@ -0,0 +1,59 @@ +--- +title: Monitor Amazon Web Services (AWS) +description: > + Use the AWS CloudWatch Monitoring template to monitor data from Amazon Web Services (AWS), Amazon Elastic Compute Cloud (EC2), and Amazon Elastic Load Balancing (ELB) with the AWS CloudWatch Service. +menu: + influxdb_2_5: + parent: Monitor infrastructure + name: AWS CloudWatch +weight: 201 +--- + +Use the [AWS CloudWatch Monitoring template](https://github.com/influxdata/community-templates/tree/master/aws_cloudwatch) to monitor data from [Amazon Web Services (AWS)](https://aws.amazon.com/), [Amazon Elastic Compute Cloud (EC2)](https://aws.amazon.com/ec2/), and [Amazon Elastic Load Balancing (ELB)](https://aws.amazon.com/elasticloadbalancing/) with the [AWS CloudWatch Service](https://aws.amazon.com/cloudwatch/). + +The AWS CloudWatch Monitoring template includes the following: + +- two [dashboards](/influxdb/v2.5/reference/glossary/#dashboard): + - **AWS CloudWatch NLB (Network Load Balancers) Monitoring**: Displays data from the `cloudwatch_aws_network_elb measurement` + - **AWS CloudWatch Instance Monitoring**: Displays data from the `cloudwatch_aws_ec2` measurement +- two [buckets](/influxdb/v2.5/reference/glossary/#bucket): `kubernetes` and `cloudwatch` +- two labels: `inputs.cloudwatch`, `AWS` +- one variable: `v.bucket` +- one [Telegraf configuration](/influxdb/v2.5/telegraf-configs/): [AWS CloudWatch input plugin](/{{< latest "telegraf" >}}/plugins//#cloudwatch) + +## Apply the template + +1. Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) to run the following command: + + ```sh + influx apply -f https://raw.githubusercontent.com/influxdata/community-templates/master/aws_cloudwatch/aws_cloudwatch.yml + ``` + For more information, see [influx apply](/influxdb/v2.5/reference/cli/influx/apply/). +2. [Install Telegraf](/{{< latest "telegraf" >}}/introduction/installation/) on a server with network access to both the CloudWatch API and [InfluxDB v2 API](/influxdb/v2.5/reference/api/). +3. In your Telegraf configuration file (`telegraf.conf`), find the following example `influxdb_v2` output plugins, and then **replace** the `urls` to specify the servers to monitor: + + ```sh + ## k8s + [[outputs.influxdb_v2]] + urls = ["http://influxdb.monitoring:8086"] + organization = "InfluxData" + bucket = "kubernetes" + token = "secret-token" + + ## cloudv2 sample + [[outputs.influxdb_v2]] + urls = ["$INFLUX_HOST"] + token = "$INFLUX_TOKEN" + organization = "$INFLUX_ORG" + bucket = “cloudwatch" + ``` +4. [Start Telegraf](/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/#start-telegraf). + +## View the incoming data + +1. In the InfluxDB user interface (UI), select **Dashboards** in the left navigation. + + {{< nav-icon "dashboards" >}} + +2. Open your AWS dashboards, and then set the `v.bucket` variable to specify the + bucket to query data from (`kubernetes` or `cloudwatch`). diff --git a/content/influxdb/v2.5/monitor-alert/templates/infrastructure/docker.md b/content/influxdb/v2.5/monitor-alert/templates/infrastructure/docker.md new file mode 100644 index 000000000..19901d0a4 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/templates/infrastructure/docker.md @@ -0,0 +1,57 @@ +--- +title: Monitor Docker +description: > + Use the [Docker Monitoring template](https://github.com/influxdata/community-templates/tree/master/docker) to monitor your Docker containers. +menu: + influxdb_2_5: + parent: Monitor infrastructure + name: Docker +weight: 202 +--- + +Use the [Docker Monitoring template](https://github.com/influxdata/community-templates/tree/master/docker) to monitor your Docker containers. First, [apply the template](#apply-the-template), and then [view incoming data](#view-incoming-data). +This template uses the [Docker input plugin](/{{< latest "telegraf" >}}/plugins//#docker) to collect metrics stored in InfluxDB and display these metrics in a dashboard. + +The Docker Monitoring template includes the following: + +- one [dashboard](/influxdb/v2.5/reference/glossary/#dashboard): **Docker** +- one [bucket](/influxdb/v2.5/reference/glossary/#bucket): `docker, 7d retention` +- labels: Docker input plugin labels +- one [Telegraf configuration](/influxdb/v2.5/telegraf-configs/): Docker input plugin +- one variable: `bucket` +- four [checks](/influxdb/v2.5/reference/glossary/#check): `Container cpu`, `mem`, `disk`, `non-zero exit` +- one [notification endpoint](/influxdb/v2.5/reference/glossary/#notification-endpoint): `Http Post` +- one [notification rule](/influxdb/v2.5/reference/glossary/#notification-rule): `Crit Alert` + +For more information about how checks, notification endpoints, and notifications rules work together, see [monitor data and send alerts](/influxdb/v2.5/monitor-alert/). + +## Apply the template + +1. Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) to run the following command: + + ```sh + influx apply -f https://raw.githubusercontent.com/influxdata/community-templates/master/docker/docker.yml + ``` + For more information, see [influx apply](/influxdb/v2.5/reference/cli/influx/apply/). + + {{% note %}} +Ensure your `influx` CLI is configured with your account credentials and that configuration is active. For more information, see [influx config](/influxdb/v2.5/reference/cli/influx/config/). + {{% /note %}} + +2. [Install Telegraf](/{{< latest "telegraf" >}}/introduction/installation/) on a server with network access to both the Docker containers and [InfluxDB v2 API](/influxdb/v2.5/reference/api/). +3. In your [Telegraf configuration file (`telegraf.conf`)](/influxdb/v2.5/telegraf-configs/), do the following: + - Depending on how you run Docker, you may need to customize the [Docker input plugin](/{{< latest "telegraf" >}}/plugins//#docker) configuration, for example, you may need to specify the `endpoint` value. + - Set the following environment variables: + - INFLUX_TOKEN: Token must have permissions to read Telegraf configurations and write data to the `telegraf` bucket. See how to [view tokens](/influxdb/v2.5/security/tokens/view-tokens/). + - INFLUX_ORG: Name of your organization. See how to [view your organization](/influxdb/v2.5/organizations/view-orgs/). + - INFLUX_HOST: Your InfluxDB host URL, for example, localhost, a remote instance, or InfluxDB Cloud. + +4. [Start Telegraf](/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/#start-telegraf). + +## View incoming data + +1. In the InfluxDB user interface (UI), select **Dashboards** in the left navigation. + + {{< nav-icon "dashboards" >}} + +2. Open the **Docker** dashboard to start monitoring. diff --git a/content/influxdb/v2.5/monitor-alert/templates/infrastructure/raspberry-pi.md b/content/influxdb/v2.5/monitor-alert/templates/infrastructure/raspberry-pi.md new file mode 100644 index 000000000..84a506110 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/templates/infrastructure/raspberry-pi.md @@ -0,0 +1,62 @@ +--- +title: Monitor Raspberry Pi +description: > + Use the Raspberry Pi system template to monitor your Raspberry Pi 4 or 400 Linux system. +menu: + influxdb_2_5: + parent: Monitor infrastructure + name: Raspberry Pi +weight: 201 +--- + +Use the [Raspberry Pi Monitoring template](https://github.com/influxdata/community-templates/tree/master/raspberry-pi) +to monitor your Raspberry Pi 4 or 400 Linux system. + +The Raspberry Pi template includes the following: + +- one [bucket](/influxdb/v2.5/reference/glossary/#bucket): `rasp-pi` (7d retention) +- labels: `raspberry-pi` + Telegraf plugin labels + - [Diskio input plugin](/{{< latest "telegraf" >}}/plugins//#diskio) + - [Mem input plugin](/{{< latest "telegraf" >}}/plugins//#mem) + - [Net input plugin](/{{< latest "telegraf" >}}/plugins//#net) + - [Processes input plugin](/{{< latest "telegraf" >}}/plugins//#processes) + - [Swap input plugin](/{{< latest "telegraf" >}}/plugins//#swap) + - [System input plugin](/{{< latest "telegraf" >}}/plugins//#system) +- one [Telegraf configuration](/influxdb/v2.5/telegraf-configs/) +- one [dashboard](/influxdb/v2.5/reference/glossary/#dashboard): Raspberry Pi System +- two variables: `bucket` and `linux_host` + +## Apply the template + +1. Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) to run the following command: + + ```sh + influx apply -f https://raw.githubusercontent.com/influxdata/community-templates/master/raspberry-pi/raspberry-pi-system.yml + ``` + For more information, see [influx apply](/influxdb/v2.5/reference/cli/influx/apply/). +2. [Install Telegraf](/{{< latest "telegraf" >}}/introduction/installation/) on + your Raspberry Pi and ensure your Raspberry Pi has network access to the + [InfluxDB {{% cloud-only %}}Cloud{{% /cloud-only %}} API](/influxdb/v2.5/reference/api/). +3. Add the following environment variables to your Telegraf environment: + + - `INFLUX_HOST`: {{% oss-only %}}Your [InfluxDB URL](/influxdb/v2.5/reference/urls/){{% /oss-only %}} + {{% cloud-only %}}Your [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/){{% /cloud-only %}} + - `INFLUX_TOKEN`: Your [InfluxDB {{% cloud-only %}}Cloud{{% /cloud-only %}} API token](/influxdb/v2.5/security/tokens/) + - `INFLUX_ORG`: Your InfluxDB {{% cloud-only %}}Cloud{{% /cloud-only %}} organization name. + + ```sh + export INFLUX_HOST=http://localhost:8086 + export INFLUX_TOKEN=mY5uP3rS3cr3T70keN + export INFLUX_ORG=example-org + ``` + +4. [Start Telegraf](/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/#start-telegraf). + +## View the incoming data + +1. In the InfluxDB user interface (UI), select **Boards** (**Dashboards**). + + {{< nav-icon "dashboards" >}} + +2. Click the Raspberry Pi System link to open your dashboard, then select `rasp-pi` +as your bucket and select your linux_host. diff --git a/content/influxdb/v2.5/monitor-alert/templates/infrastructure/vshpere.md b/content/influxdb/v2.5/monitor-alert/templates/infrastructure/vshpere.md new file mode 100644 index 000000000..440fbf4d8 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/templates/infrastructure/vshpere.md @@ -0,0 +1,58 @@ +--- +title: Monitor vSphere +description: > + Use the [vSphere Dashboard for InfluxDB v2 template](https://github.com/influxdata/community-templates/tree/master/vsphere) to monitor your vSphere host. +menu: + influxdb_2_5: + parent: Monitor infrastructure + name: vSphere +weight: 206 +--- + +Use the [vSphere Dashboard for InfluxDB v2 template](https://github.com/influxdata/community-templates/tree/master/vsphere) to monitor your vSphere host. First, [apply the template](#apply-the-template), and then [view incoming data](#view-incoming-data). +This template uses the [Docker input plugin](/{{< latest "telegraf" >}}/plugins//#docker) to collect metrics stored in InfluxDB and display these metrics in a dashboard. + +The Docker Monitoring template includes the following: + +- one [dashboard](/influxdb/v2.5/reference/glossary/#dashboard): **vsphere** +- one [bucket](/influxdb/v2.5/reference/glossary/#bucket): `vsphere` +- label: vsphere +- one [Telegraf configuration](/influxdb/v2.5/telegraf-configs/): InfluxDB v2 output plugin, vSphere input plugin +- one variable: `bucket` + +## Apply the template + +1. Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) to run the following command: + + ```sh + influx apply -f https://raw.githubusercontent.com/influxdata/community-templates/master/vsphere/vsphere.yml + ``` + For more information, see [influx apply](/influxdb/v2.5/reference/cli/influx/apply/). + + {{% note %}} +Ensure your `influx` CLI is configured with your account credentials and that configuration is active. For more information, see [influx config](/influxdb/v2.5/reference/cli/influx/config/). + {{% /note %}} + +2. [Install Telegraf](/{{< latest "telegraf" >}}/introduction/installation/) on a server with network access to both the vSphere host and [InfluxDB v2 API](/influxdb/v2.5/reference/api/). +3. In your [Telegraf configuration file (`telegraf.conf`)](/influxdb/v2.5/telegraf-configs/), do the following: + - Set the following environment variables: + - INFLUX_TOKEN: Token must have permissions to read Telegraf configurations and write data to the `telegraf` bucket. See how to [view tokens](/influxdb/v2.5/security/tokens/view-tokens/). + - INFLUX_ORG: Name of your organization. See how to [view your organization](/influxdb/v2.5/organizations/view-orgs/). + - INFLUX_HOST: Your InfluxDB host URL, for example, localhost, a remote instance, or InfluxDB Cloud. + - INFLUX_BUCKET: Bucket to store data in. To use the bucket included, you must export the variable: `export INFLUX_BUCKET=vsphere` +4. - Set the host address to the vSphere and provide the `username` and `password` as variables: + ```sh + vcenters = [ "https://$VSPHERE_HOST/sdk" ] + username = "$vsphere-user" + password = "$vsphere-password" + ``` + +4. [Start Telegraf](/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/#start-telegraf). + +## View incoming data + +1. In the InfluxDB user interface (UI), select **Dashboards** in the left navigation. + + {{< nav-icon "dashboards" >}} + +2. Open the **vsphere** dashboard to start monitoring. diff --git a/content/influxdb/v2.5/monitor-alert/templates/infrastructure/windows.md b/content/influxdb/v2.5/monitor-alert/templates/infrastructure/windows.md new file mode 100644 index 000000000..b8b95dbc1 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/templates/infrastructure/windows.md @@ -0,0 +1,55 @@ +--- +title: Monitor Windows +description: > + Use the [Windows System Monitoring template](https://github.com/influxdata/community-templates/tree/master/windows_system) to monitor your Windows system. +menu: + influxdb_2_5: + parent: Monitor infrastructure + name: Windows +weight: 207 +--- + +Use the [Windows System Monitoring template](https://github.com/influxdata/community-templates/tree/master/windows_system) to monitor your Windows system. First, [apply the template](#apply-the-template), and then [view incoming data](#view-incoming-data). + +The Windows System Monitoring template includes the following: + +- one [dashboard](/influxdb/v2.5/reference/glossary/#dashboard): **Windows System** +- one [bucket](/influxdb/v2.5/reference/glossary/#bucket): `telegraf`, 7d retention +- label: `Windows System Template`, Telegraf plugin labels: `outputs.influxdb_v2` +- one [Telegraf configuration](/influxdb/v2.5/telegraf-configs/): InfluxDB v2 output plugin, Windows Performance Counters input plugin +- two variables: `bucket`, `windows_host` + +## Apply the template + +1. Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) to run the following command: + + ```sh + influx apply -f https://raw.githubusercontent.com/influxdata/community-templates/master/windows_system/windows_system.yml + ``` + For more information, see [influx apply](/influxdb/v2.5/reference/cli/influx/apply/). + + {{% note %}} +Ensure your `influx` CLI is configured with your account credentials and that configuration is active. For more information, see [influx config](/influxdb/v2.5/reference/cli/influx/config/). + {{% /note %}} + +2. [Install Telegraf](/{{< latest "telegraf" >}}/introduction/installation/) on a server with network access to both the Windows system and [InfluxDB v2 API](/influxdb/v2.5/reference/api/). +3. In your [Telegraf configuration file (`telegraf.conf`)](/influxdb/v2.5/telegraf-configs/), do the following: + - Set the following environment variables: + - INFLUX_TOKEN: Token must have permissions to read Telegraf configurations and write data to the `telegraf` bucket. See how to [view tokens](/influxdb/v2.5/security/tokens/view-tokens/). + - INFLUX_ORG: Name of your organization. See how to [view your organization](/influxdb/v2.5/organizations/view-orgs/). + - INFLUX_URL: Your InfluxDB host URL, for example, localhost, a remote instance, or InfluxDB Cloud. + +4. [Start Telegraf](/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/#start-telegraf). +5. To monitor multiple Windows systems, repeat steps 1-4 for each system. + +## View incoming data + +1. In the InfluxDB user interface (UI), select **Dashboards** in the left navigation. + + {{< nav-icon "dashboards" >}} + +2. Open the **Windows System** dashboard to start monitoring. + + {{% note %}} + If you're monitoring multiple Windows machines, switch between them using the `windows_host` filter at the top of the dashboard. + {{% /note %}} diff --git a/content/influxdb/v2.5/monitor-alert/templates/monitor.md b/content/influxdb/v2.5/monitor-alert/templates/monitor.md new file mode 100644 index 000000000..c90736ce7 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/templates/monitor.md @@ -0,0 +1,175 @@ +--- +title: Monitor InfluxDB OSS using a template +description: > + Monitor your InfluxDB OSS instance using InfluxDB Cloud and + a pre-built InfluxDB template. +menu: + influxdb_2_5: + parent: Monitor with templates + name: Monitor InfluxDB OSS +weight: 102 +influxdb/v2.5/tags: [templates, monitor] +aliases: + - /influxdb/v2.5/influxdb-templates/monitor/ +related: + - /influxdb/v2.5/reference/cli/influx/apply/ + - /influxdb/v2.5/reference/cli/influx/template/ +--- + +Use [InfluxDB Cloud](/influxdb/cloud/), the [InfluxDB Open Source (OSS) Metrics template](https://github.com/influxdata/community-templates/tree/master/influxdb2_oss_metrics), +and [Telegraf](/{{< latest "telegraf" >}}/) to monitor one or more InfluxDB OSS instances. + +Do the following: + +1. [Review requirements](#review-requirements) +2. [Install the InfluxDB OSS Monitoring template](#install-the-influxdb-oss-monitoring-template) +3. [Set up InfluxDB OSS for monitoring](#set-up-influxdb-oss-for-monitoring) +4. [Set up Telegraf](#set-up-telegraf) +5. [View the Monitoring dashboard](#view-the-monitoring-dashboard) +6. (Optional) [Alert when metrics stop reporting](#alert-when-metrics-stop-reporting) +7. (Optional) [Create a notification endpoint and rule](#create-a-notification-endpoint-and-rule) + +## Review requirements + +Before you begin, make sure you have access to the following: + +- InfluxDB Cloud account ([sign up for free here](https://cloud2.influxdata.com/signup)) +- Command line access to a machine [running InfluxDB OSS 2.x](/influxdb/v2.5/install/) and permissions to install Telegraf on this machine +- Internet connectivity from the machine running InfluxDB OSS 2.x and Telegraf to InfluxDB Cloud +- Sufficient resource availability to install the template (InfluxDB Cloud Free + Plan accounts include [resource limits](/influxdb/cloud/account-management/pricing-plans/#resource-limits/influxdb/cloud/account-management/pricing-plans/#resource-limits)) + +## Install the InfluxDB OSS Monitoring template + +The InfluxDB OSS Monitoring template includes a Telegraf configuration that sends +InfluxDB OSS metrics to an InfluxDB endpoint and a dashboard that visualizes the metrics. + +1. [Log into your InfluxDB Cloud account](https://cloud2.influxdata.com/). +2. Go to **Settings > Templates** in the navigation bar on the left + + {{< nav-icon "Settings" >}} + +3. Under **Paste the URL of the Template's resource manifest file**, enter the + following template URL: + + ``` + https://raw.githubusercontent.com/influxdata/community-templates/master/influxdb2_oss_metrics/influxdb2_oss_metrics.yml + ``` + +4. Click **{{< caps >}}Lookup Template{{< /caps >}}**, and then click **{{< caps >}}Install Template{{< /caps >}}**. + InfluxDB Cloud imports the template, which includes the following resources: + + - Dashboard `InfluxDB OSS Metrics` + - Telegraf configuration `scrape-influxdb-oss-telegraf` + - Bucket `oss_metrics` + - Check `InfluxDB OSS Deadman` + - Labels `influxdb2` and `prometheus` + +## Set up InfluxDB OSS for monitoring + +By default, InfluxDB OSS 2.x has a `/metrics` endpoint available, which exports +internal InfluxDB metrics in [Prometheus format](https://prometheus.io/docs/concepts/data_model/). + +1. Ensure the `/metrics` endpoint is [enabled](/{{< latest "influxdb" >}}/reference/config-options/#metrics-disabled). + If you've changed the default settings to disable the `/metrics` endpoint, + [re-enable these settings](/{{< latest "influxdb" >}}/reference/config-options/#metrics-disabled). +2. Navigate to the `/metrics` endpoint of your InfluxDB OSS instance to view the InfluxDB OSS system metrics in your browser: + +## Set up Telegraf + +Set up Telegraf to scrape metrics from InfluxDB OSS to send to your InfluxDB Cloud account. + +On each InfluxDB OSS instance you want to monitor, do the following: + +1. [Install Telegraf](/{{< latest "telegraf" >}}/introduction/installation/). +2. Set the following environment variables in your Telegraf environment: + + - `INFLUX_URL`: Your [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/) + - `INFLUX_ORG`: Your InfluxDB Cloud organization name + +1. [In the InfluxDB Cloud UI](https://cloud2.influxdata.com/), go to **Load Data > Telegraf** in the left navigation. + + {{< nav-icon "load-data" >}} + +2. Click **Setup Instructions** under **Scrape InfluxDB OSS Metrics**. +3. Complete the Telegraf Setup instructions to start Telegraf using the Scrape InfluxDB OSS Metrics + Telegraf configuration stored in InfluxDB Cloud. + + {{% note %}} +For your API token, generate a new token or use an existing All Access token. If you run Telegraf as a service, edit your init script to set the environment variable and ensure its available to the service. + {{% /note %}} + +Telegraf runs quietly in the background (no immediate output appears), and begins +pushing metrics to the `oss_metrics` bucket in your InfluxDB Cloud account. + +## View the Monitoring dashboard + +To see your data in real time, view the Monitoring dashboard. + +1. Select **Dashboards** in your **InfluxDB Cloud** account. + + {{< nav-icon "dashboards" >}} + +2. Click **InfluxDB OSS Metrics**. Metrics appear in your dashboard. +3. Customize your monitoring dashboard as needed. For example, send an alert in the following cases: + - Users create a new task or bucket + - You're testing machine limits + - [Metrics stop reporting](#alert-when-metrics-stop-reporting) + +## Alert when metrics stop reporting + +The Monitoring template includes a [deadman check](/influxdb/cloud/monitor-alert/checks/create/#deadman-check) to verify metrics are reported at regular intervals. + +To alert when data stops flowing from InfluxDB OSS instances to your InfluxDB Cloud account, do the following: + +1. [Customize the deadman check](#customize-the-deadman-check) to identify the fields you want to monitor. +2. [Create a notification endpoint and rule](#create-a-notification-endpoint-and-rule) to receive notifications when your deadman check is triggered. + +### Customize the deadman check + +1. To view the deadman check, click **Alerts** in the navigation bar of your **InfluxDB Cloud** account. + + {{< nav-icon "alerts" >}} + +2. Choose a InfluxDB OSS field or create a new OSS field for your deadman alert: + 1. Click **{{< caps >}}{{< icon "plus" >}} Create{{< /caps >}}** and select **Deadman Check** in the dropdown menu. + 2. Define your query with at least one field. + 3. Click **{{< caps >}}Submit{{< /caps >}}** and **{{< caps >}}Configure Check{{< /caps >}}**. + When metrics stop reporting, you'll receive an alert. +3. Start under **Schedule Every**, set the amount of time to check for data. +4. Set the amount of time to wait before switching to a critical alert. +5. Click **{{< icon "check" >}}** to save the check. + +## Create a notification endpoint and rule + +To receive a notification message when your deadman check is triggered, create a [notification endpoint](#create-a-notification-endpoint) and [rule](#create-a-notification-rule). + +### Create a notification endpoint + +InfluxData supports different endpoints: Slack, PagerDuty, and HTTP. Slack is free for all users, while PagerDuty and HTTP are exclusive to the Usage-Based Plan. + +#### Send a notification to Slack + +1. Create a [Slack Webhooks](https://api.slack.com/messaging/webhooks). +2. Go to **Alerts > Alerts** in the left navigation menu and then click **{{< caps >}}Notification Endpoints{{< /caps >}}**. + + {{< nav-icon "alerts" >}} + +4. Click **{{< caps >}}{{< icon "plus" >}} Create{{< /caps >}}**, and enter a name and description for your Slack endpoint. +3. Enter your Slack Webhook under **Incoming Webhook URL** and click **{{< caps >}}Create Notification Endpoint{{< /caps >}}**. + +#### Send a notification to PagerDuty or HTTP + +Send a notification to PagerDuty or HTTP endpoints (other webhooks) by [upgrading your InfluxDB Cloud account](/influxdb/cloud/account-management/billing/#upgrade-to-usage-based-plan). + +### Create a notification rule + +[Create a notification rule](/influxdb/cloud/monitor-alert/notification-rules/create/) to set rules for when to send a deadman alert message to your notification endpoint. + +1. Go to **Alerts > Alerts** in the left navigation menu and then click **{{< caps >}}Notification Rules{{< /caps >}}**. + + {{< nav-icon "alerts" >}} + +4. Click **{{< caps >}}{{< icon "plus" >}} Create{{< /caps >}}**, and then provide + the required information. +3. Click **{{< caps >}}Create Notification Rule{{< /caps >}}**. diff --git a/content/influxdb/v2.5/monitor-alert/templates/networks/_index.md b/content/influxdb/v2.5/monitor-alert/templates/networks/_index.md new file mode 100644 index 000000000..c43030268 --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/templates/networks/_index.md @@ -0,0 +1,14 @@ +--- +title: Monitor networks +description: > + Use one of our community templates to quickly set up InfluxDB (with a bucket and dashboard) to collect, analyze, and monitor your networks. +menu: + influxdb_2_5: + parent: Monitor with templates +weight: 104 +influxdb/v2.5/tags: [monitor, templates, networks, networking] +--- + +Use one of our community templates to quickly set up InfluxDB (with a bucket and dashboard) to collect, analyze, and monitor your networks. + +{{< children >}} \ No newline at end of file diff --git a/content/influxdb/v2.5/monitor-alert/templates/networks/haproxy.md b/content/influxdb/v2.5/monitor-alert/templates/networks/haproxy.md new file mode 100644 index 000000000..89aeaacfd --- /dev/null +++ b/content/influxdb/v2.5/monitor-alert/templates/networks/haproxy.md @@ -0,0 +1,49 @@ +--- +title: Monitor HAProxy +description: > + Use the [HAProxy for InfluxDB v2 template](https://github.com/influxdata/community-templates/tree/master/haproxy) to monitor your HAProxy instance. +menu: + influxdb_2_5: + parent: Monitor networks + name: HAproxy +weight: 201 +--- + +Use the [HAProxy for InfluxDB v2 template](https://github.com/influxdata/community-templates/tree/master/haproxy) to monitor your HAProxy instances. First, [apply the template](#apply-the-template), and then [view incoming data](#view-incoming-data). +This template uses the [HAProxy input plugin](/{{< latest "telegraf" >}}/plugins//#haproxy) to collect metrics stored in an HAProxy instance and display these metrics in a dashboard. + +The HAProxy for InfluxDB v2 template includes the following: + +- one [dashboard](/influxdb/v2.5/reference/glossary/#dashboard): **HAProxy** +- one [bucket](/influxdb/v2.5/reference/glossary/#bucket): `haproxy` +- label: `haproxy` +- one [Telegraf configuration](/influxdb/v2.5/telegraf-configs/): HAProxy input plugin, InfluxDB v2 output plugin +- one variable: `bucket` + +## Apply the template + +1. Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) to run the following command: + + ```sh + influx apply -f https://raw.githubusercontent.com/influxdata/community-templates/master/haproxy/haproxy.yml + ``` + For more information, see [influx apply](/influxdb/v2.5/reference/cli/influx/apply/). + + > **Note:** Ensure your `influx` CLI is configured with your account credentials and that configuration is active. For more information, see [influx config](/influxdb/v2.5/reference/cli/influx/config/). + +2. [Install Telegraf](/{{< latest "telegraf" >}}/introduction/installation/) on a server with network access to both the HAProxy instances and [InfluxDB v2 API](/influxdb/v2.5/reference/api/). +3. In your [Telegraf configuration file (`telegraf.conf`)](/influxdb/v2.5/telegraf-configs/), do the following: + - Set the following environment variables: + - INFLUX_TOKEN: Token must have permissions to read Telegraf configurations and write data to the `haproxy` bucket. See how to [view tokens](/influxdb/v2.5/security/tokens/view-tokens/). + - INFLUX_ORG: Name of your organization. See how to [view your organization](/influxdb/v2.5/organizations/view-orgs/). + - INFLUX_HOST: Your InfluxDB host URL, for example, localhost, a remote instance, or InfluxDB Cloud. + +4. [Start Telegraf](/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/#start-telegraf). + +## View incoming data + +1. In the InfluxDB user interface (UI), select **Dashboards** in the left navigation. + + {{< nav-icon "dashboards" >}} + +2. Open the **HAProxy** dashboard to start monitoring. diff --git a/content/influxdb/v2.5/notebooks/_index.md b/content/influxdb/v2.5/notebooks/_index.md new file mode 100644 index 000000000..716c44e5d --- /dev/null +++ b/content/influxdb/v2.5/notebooks/_index.md @@ -0,0 +1,17 @@ +--- +title: Notebooks +seotitle: Build notebooks in InfluxDB Cloud +description: > + Use notebooks to build and annotate processes and data flows for time series data. +menu: + influxdb_2_5: + name: Notebooks +weight: 6 +influxdb/v2.5/tags: [notebooks] +--- + +Notebooks are a way to build and annotate processes and data flows for time series data. Notebooks include cells and controls to transform the data in your bucket and other countless possibilities. + +To learn how to use notebooks, check out the following articles: + +{{< children >}} diff --git a/content/influxdb/v2.5/notebooks/clean-data.md b/content/influxdb/v2.5/notebooks/clean-data.md new file mode 100644 index 000000000..9b9c8be0e --- /dev/null +++ b/content/influxdb/v2.5/notebooks/clean-data.md @@ -0,0 +1,163 @@ +--- +title: Normalize data with notebooks +description: > + Learn how to create a notebook that normalizes or cleans data to make it + easier to work with. +weight: 105 +influxdb/v2.5/tags: [notebooks] +menu: + influxdb_2_5: + name: Normalize data + parent: Notebooks +--- + +Learn how to create a notebook that normalizes data. +Data normalization is the process of modifying or cleaning data to make it easier to +work with. Examples include adjusting numeric values to a uniform scale and modifying strings. + +Walk through the following example to create a notebook that queries +[NOAA NDBC sample data](/influxdb/v2.0/reference/sample-data/#noaa-ndbc-data), +normalizes degree-based wind directions to cardinal directions, and then writes +the normalized data to a bucket. + +{{< cloud-only >}} +{{% cloud %}} +**Note**: Using sample data counts towards your total InfluxDB Cloud usage. +{{% /cloud %}} +{{< /cloud-only >}} + +1. [Create a new notebook](/influxdb/v2.5/notebooks/create-notebook/). +2. In the **Build a Query** cell: + + 1. In the **FROM** column under **{{% caps %}}Sample{{% /caps %}}**, + select **NOAA National Buoy Data**. + 2. In the next **FILTER** column, select **_measurement** from the drop-down list + and select the **ndbc** measurement in the list of measurements. + 3. In the next **FILTER** column, select **_field** from the drop-down list, + and select the **wind\_dir\_degt** field from the list of fields. + +3. Click {{% icon "notebook-add-cell" %}} after your **Build a Query** cell to + add a new cell and select **{{% caps %}}Flux Script{{% /caps %}}**. + +4. In the Flux script cell: + + 1. Define a custom function (`cardinalDir()`) that converts a numeric degree + value to a cardinal direction (N, NNE, NE, etc.). + 2. Use `__PREVIOUS_RESULT__` to load the output of the previous notebook + cell into the Flux script. + 3. Use [`map()`](/{{< latest "flux" >}}/stdlib/universe/map/) to iterate + over each input row, update the field key to `wind_dir_cardinal`, and + normalize the `_value` column to a cardinal direction using the custom + `cardinalDir()` function. + 4. {{% cloud-only %}} + + Use [`to()`](/{{< latest "flux">}}/stdlib/influxdata/influxdb/to/) + to write the normalized data back to InfluxDB. + Specify an existing bucket to write to or + [create a new bucket](/influxdb/v2.5/organizations/buckets/create-bucket/). + + {{% /cloud-only %}} + + {{% oss-only %}} + + ```js + import "array" + + cardinalDir = (d) => { + _cardinal = if d >= 348.7 or d < 11.25 then "N" + else if d >= 11.25 and d < 33.75 then "NNE" + else if d >= 33.75 and d < 56.25 then "NE" + else if d >= 56.25 and d < 78.75 then "ENE" + else if d >= 78.75 and d < 101.25 then "E" + else if d >= 101.25 and d < 123.75 then "ESE" + else if d >= 123.75 and d < 146.25 then "SE" + else if d >= 146.25 and d < 168.75 then "SSE" + else if d >= 168.75 and d < 191.25 then "S" + else if d >= 191.25 and d < 213.75 then "SSW" + else if d >= 213.75 and d < 236.25 then "SW" + else if d >= 236.25 and d < 258.75 then "WSW" + else if d >= 258.75 and d < 281.25 then "W" + else if d >= 281.25 and d < 303.75 then "WNW" + else if d >= 303.75 and d < 326.25 then "NW" + else if d >= 326.25 and d < 348.75 then "NNW" + else "" + + return _cardinal + } + + __PREVIOUS_RESULT__ + |> map(fn: (r) => ({r with + _field: "wind_dir_cardinal", + _value: cardinalDir(d: r._value), + })) + ``` + {{% /oss-only %}} + + {{% cloud-only %}} + + ```js + import "array" + + cardinalDir = (d) => { + _cardinal = if d >= 348.7 or d < 11.25 then "N" + else if d >= 11.25 and d < 33.75 then "NNE" + else if d >= 33.75 and d < 56.25 then "NE" + else if d >= 56.25 and d < 78.75 then "ENE" + else if d >= 78.75 and d < 101.25 then "E" + else if d >= 101.25 and d < 123.75 then "ESE" + else if d >= 123.75 and d < 146.25 then "SE" + else if d >= 146.25 and d < 168.75 then "SSE" + else if d >= 168.75 and d < 191.25 then "S" + else if d >= 191.25 and d < 213.75 then "SSW" + else if d >= 213.75 and d < 236.25 then "SW" + else if d >= 236.25 and d < 258.75 then "WSW" + else if d >= 258.75 and d < 281.25 then "W" + else if d >= 281.25 and d < 303.75 then "WNW" + else if d >= 303.75 and d < 326.25 then "NW" + else if d >= 326.25 and d < 348.75 then "NNW" + else "" + + return _cardinal + } + + __PREVIOUS_RESULT__ + |> map(fn: (r) => ({r with + _field: "wind_dir_cardinal", + _value: cardinalDir(d: r._value), + })) + |> to(bucket: "example-bucket") + ``` + {{% /cloud-only %}} + +4. {{% oss-only %}} + + Click {{% icon "notebook-add-cell" %}} after your **Flux Script** cell to + add a new cell and select **{{% caps %}}Output to Bucket{{% /caps %}}**. + Select a bucket from the **{{% icon "bucket" %}} Choose a bucket** + drop-down list. + + {{% /oss-only %}} + +5. _(Optional)_ Click {{% icon "notebook-add-cell" %}} and select **Note** to + add a cell containing notes about what this notebook does. For example, the + cell might say, "This notebook converts decimal degree wind direction values + to cardinal directions." +6. {{% oss-only %}} + + Click **Preview** in the upper left to verify that your notebook runs and previews the output. + + {{% /oss-only %}} +6. Click **Run** to run the notebook and write the normalized data to your bucket. + +## Continuously run a notebook +To continuously run your notebook, export the notebook as a task: + +1. Click {{% icon "notebook-add-cell" %}} to add a new cell and then select + **{{% caps %}}Task{{% /caps %}}**. +2. Provide the following: + + - **Every**: Interval that the task should run at. + - **Offset**: _(Optional)_ Time to wait after the defined interval to execute the task. + This allows the task to capture late-arriving data. + +3. Click **{{% icon "export" %}} Export as Task**. diff --git a/content/influxdb/v2.5/notebooks/create-notebook.md b/content/influxdb/v2.5/notebooks/create-notebook.md new file mode 100644 index 000000000..161549d10 --- /dev/null +++ b/content/influxdb/v2.5/notebooks/create-notebook.md @@ -0,0 +1,180 @@ +--- +title: Create a notebook +description: > + Create a notebook to explore, visualize, and process your data. +weight: 102 +influxdb/v2.5/tags: [notebooks] +menu: + influxdb_2_5: + name: Create a notebook + parent: Notebooks +--- + +Create a notebook to explore, visualize, and process your data. +Learn how to add and configure cells to customize your notebook. +To learn the benefits and concepts of notebooks, see [Overview of Notebooks](/influxdb/v2.5/notebooks/overview/). + +- [Create a notebook from a preset](#create-a-notebook-from-a-preset) +- [Use data source cells](#use-data-source-cells) +- [Use visualization cells](#use-visualization-cells) +- [Add a data source cell](#add-a-data-source-cell) +- [Add a validation cell](#add-a-validation-cell) +- [Add a visualization cell](#add-a-visualization-cell) + +## Create a notebook from a preset + +To create a new notebook, do the following: + +1. In the navigation menu on the left, click **Notebooks**. + + {{< nav-icon "notebooks" >}} +2. In the **Notebooks** page, select one of the following options under **Create a Notebook**: + - **New Notebook**: includes a [query builder cell](#add-a-data-source-cell), a [validation cell](#add-a-validation-cell), and a [visualization cell](#add-a-visualization-cell). + - **Set an Alert**: includes a [query builder cell](#add-a-data-source-cell), a [validation cell](#add-a-validation-cell), a [visualization cell](#add-a-visualization-cell), and an [alert builder cell](#add-an-action-cell). + - **Schedule a Task**: includes a [Flux Script editor cell](#add-a-data-source-cell), a [validation cell](#add-a-validation-cell), and a [task schedule cell](#add-an-action-cell). + - **Write a Flux Script**: includes a [Flux script editor cell](#add-a-data-source-cell), and a [validation cell](#add-a-validation-cell). + +3. Enter a name for your notebook in the **Untitled Notebook** field. +4. Do the following at the top of the page: + - Select your local time zone or UTC. + - Choose a time [range](/{{% latest "flux" %}}/stdlib/universe/range/) for your data. +5. Your notebook should have a **Data Source** cell as the first cell. **Data Source** cells provide data to subsequent cells. The presets (listed in step 2) include either a **Query Builder** or a **Flux Script** as the first cell. +6. To define your data source query, do one of the following: + - If your notebook uses a **Query Builder** cell, select your bucket and any additional filters for your query. + - If your notebook uses a **Flux Script** cell, enter or paste a [Flux script](/influxdb/v2.5/query-data/flux/). +7. {{% oss-only %}} + + Select and click **Preview** (or press **CTRL + Enter**) under the notebook title. + InfluxDB displays query results in **Validate the Data** and **Visualize the Result** *without writing data or + running actions*. + + {{% /oss-only %}} +8. (Optional) Change your visualization settings with the drop-down menu and the {{< icon "gear" >}} **Configure** button at the top of the **Visualize the Result** cell. +9. (Optional) Toggle the **Presentation** switch to display visualization cells and hide all other cells. +10. (Optional) Configure notebook actions {{% oss-only %}}(**Alert**, **Task**, or **Output to Bucket**){{% /oss-only %}}{{% cloud-only %}}(**Alert** or **Task**){{% /cloud-only %}}. +11. (Optional) To run your notebook actions, select and click **Run** under the notebook title. +12. (Optional) To add a new cell, follow the steps for one of the cell types: + + - [Add a data source cell](#add-a-data-source-cell) + - [Add a validation cell](#add-a-validation-cell) + - [Add a visualization cell](#add-a-visualization-cell) + - [Add an action cell](#add-an-action-cell) +13. (Optional) [Convert a query builder cell into raw Flux script](#convert-a-query-builder-to-flux) to view and edit the code. + +## Use Data Source cells + +### Convert a Query Builder to Flux +To edit the raw Flux script of a **Query Builder** cell, convert the cell to Flux. + +{{% warn %}} +You can't convert a **Flux Script** editor cell to a **Query Builder** cell. +Once you convert a **Query Builder** cell to a **Flux Script** editor cell, you can't convert it back. +{{% /warn %}} + +1. Click the {{% icon "more" %}} icon in the **Query Builder** cell you want to edit as Flux, and then select **Convert to |> Flux**. +You won't be able to undo this step. + + A **Flux Script** editor cell containing the raw Flux script replaces the **Query Builder** cell. + +2. View and edit the Flux script as needed. + +## Use visualization cells + +- To change your [visualization type](/influxdb/v2.5/visualize-data/visualization-types/), select a new type from the drop-down list at the top of the cell. +- (For histogram only) To specify values, click **Select**. +- To configure the visualization, click **Configure**. +- To download results as an annotated CSV file, click the **CSV** button. +- To export to the dashboard, click **Export to Dashboard**. + +## Add a data source cell + +Add a [data source cell](/influxdb/v2.5/notebooks/overview/#data-source) to pull information into your notebook. + +To add a data source cell, do the following: +1. Click {{< icon "notebook-add-cell" >}}. +2. Select **{{< caps >}}Flux Script{{< /caps >}}** or **{{< caps >}}Query Builder{{< /caps >}}** as your input, and then select or enter the bucket to pull data from. +3. Select filters to narrow your data. +4. Select {{% oss-only %}}**Preview** (**CTRL + Enter**) or {{% /oss-only %}}**Run** in the upper left drop-down list. + +## Add a validation cell + +A validation cell uses the **Table** [visualization type](/influxdb/v2.5/visualize-data/visualization-types/) to display query results from a data source cell. + +To add a **Table** visualization cell, do the following: + +1. Click {{< icon "notebook-add-cell" >}}. +2. Under **Visualization**, click **{{< caps >}}Table{{< /caps >}}**. + +## Add a visualization cell + +Add a visualization cell to render query results as a [Visualization type](/influxdb/v2.5/visualize-data/visualization-types/). + +To add a Table visualization cell, do the following: + +1. Click {{< icon "notebook-add-cell" >}}. +2. Under **Visualization**, select one of the following visualization cell-types: + + - **{{< caps >}}Table{{< /caps >}}**: Display data in tabular format. + - **{{< caps >}}Graph{{< /caps >}}**: Visualize data using InfluxDB visualizations. + - **{{< caps >}}Note{{< /caps >}}**: Use Markdown to add notes or other information to your notebook. + +To modify a visualization cell, see [use visualization cells](#use-visualization-cells). +For detail on available visualization types and how to use them, see [Visualization types](/influxdb/v2.5/visualize-data/visualization-types/). + +## Add an action cell + +Add an [action cell](/influxdb/v2.5/notebooks/overview/#action) to create an [alert](/influxdb/v2.5/monitor-alert/) +{{% cloud-only %}}or{{% /cloud-only %}}{{% oss-only %}},{{% /oss-only %}} process data with a [task](/influxdb/v2.5/process-data/manage-tasks/) +{{% oss-only %}}, or output data to a bucket{{% /oss-only %}}. + +{{% oss-only %}} + +{{% warn %}} +If your cell contains a custom script that uses any output function to write data to InfluxDB (for example: the `to()` function) or sends data to a third-party service, clicking Preview will write data. +{{% /warn %}} + +{{% /oss-only %}} + +- [Add an Alert cell](#add-an-alert-cell) +- {{% oss-only %}}[Add an Output to Bucket cell](#add-an-output-to-bucket-cell){{% /oss-only %}} +- [Add a Task cell](#add-a-task-cell) + +### Add an Alert cell + +To add an [alert](/influxdb/v2.5/monitor-alert/) to your notebook, do the following: + +1. Enter a time range to automatically check the data and enter your query offset. +2. Customize the conditions to send an alert. +3. Select an endpoint to receive an alert: + - Slack and a Slack Channel + - HTTP post + - PagerDuty +4. (Optional) Personalize your message. By default, the message is: + ``` + ${strings.title(v: r._type)} for ${r._source_measurement} triggered at ${time(v: r._source_timestamp)}! + ``` +5. Click **{{< caps >}}Test Alert{{< /caps >}}** to send a test message to your configured **Endpoint**. The test will not schedule the new alert. +6. Click **{{< icon "export" >}} {{< caps >}}Export Alert Task{{< /caps >}}** to create your alert. + +{{% oss-only %}} + +### Add an Output to Bucket cell + +To write **Data Source** results to a bucket, do the following: + +1. Click {{% icon "notebook-add-cell" %}}. +2. Click **{{< caps >}}Output to Bucket{{< /caps >}}**. +3. In the **{{< icon "bucket" >}} Choose a bucket** drop-down list, select or create a bucket. +4. Click **Preview** to view the query result in validation cells. +5. Select and click **Run** in the upper left to write the query result to the bucket. + +{{% /oss-only %}} + +### Add a Task cell + +To add a [task](/influxdb/v2.5/process-data/manage-tasks/) to your notebook, do the following: + +1. Click {{% icon "notebook-add-cell" %}}. +2. Click **{{< caps >}}Task{{< /caps >}}**. +3. Enter a time and an offset to schedule the task. +4. Click **{{< icon "task" >}} {{< caps >}}Export as Task{{< /caps >}}** to save. diff --git a/content/influxdb/v2.5/notebooks/downsample.md b/content/influxdb/v2.5/notebooks/downsample.md new file mode 100644 index 000000000..ec01aab85 --- /dev/null +++ b/content/influxdb/v2.5/notebooks/downsample.md @@ -0,0 +1,111 @@ +--- +title: Downsample data with notebooks +description: > + Create a notebook to downsample data. Downsampling aggregates or summarizes data + within specified time intervals, reducing the overall disk usage as data + collects over time. +weight: 104 +influxdb/v2.5/tags: [notebooks] +menu: + influxdb_2_5: + name: Downsample data + identifier: notebooks-downsample + parent: Notebooks +--- + +Create a notebook to downsample data. Downsampling aggregates or summarizes data +within specified time intervals, reducing the overall disk usage as data +collects over time. + +The following example creates a notebook that queries **Coinbase bitcoin price +sample data** from the last hour, downsamples the data into ten minute summaries, +and then writes the downsampled data to an InfluxDB bucket. + +1. If you do not have an existing bucket to write the downsampled data to, + [create a new bucket](/influxdb/v2.5/organizations/buckets/create-bucket/). +2. [Create a new notebook](/influxdb/v2.5/notebooks/create-notebook/). +3. Select **Past 1h** from the time range drop-down list at the top of your notebook. +4. In the **Build a Query** cell: + + 1. In the **FROM** column under **{{% caps %}}Sample{{% /caps %}}**, + select **Coinbase bitcoin price**. + 2. In the next **FILTER** column, select **_measurement** from the drop-down list + and select the **coindesk** measurement in the list of measurements. + 3. In the next **FILTER** column, select **_field** from the drop-down list, + and select the **price** field from the list of fields. + 4. In the next **FILTER** column, select **code** from the drop-down list, + and select a currency code. + +5. Click {{% icon "notebook-add-cell" %}} after your **Build a Query** cell to + add a new cell and select **{{% caps %}}Flux Script{{% /caps %}}**. + +6. In the Flux script cell: + + 1. Use `__PREVIOUS_RESULT__` to load the output of the previous notebook + cell into the Flux script. + 2. Use [`aggregateWindow()`](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow/) + to window data into ten minute intervals and return the average of each interval. + Specify the following parameters: + + - **every**: Window interval _(should be less than or equal to the duration of the queried time range)_. + For this example, use `10m`. + - **fn**: [Aggregate](/{{< latest "flux" >}}/function-types/#aggregates) + or [selector](/{{< latest "flux" >}}/function-types/#selectors) function + to apply to each window. + For this example, use `mean`. + + 3. {{% cloud-only %}} + + Use [`to()`](/{{< latest "flux">}}/stdlib/influxdata/influxdb/to/) + to write the downsampled data back to an InfluxDB bucket. + + {{% /cloud-only %}} + + {{% oss-only %}} + + ```js + __PREVIOUS_RESULT__ + |> aggregateWindow(every: 10m, fn: mean) + ``` + {{% /oss-only %}} + + {{% cloud-only %}} + + ```js + __PREVIOUS_RESULT__ + |> aggregateWindow(every: 10m, fn: mean) + |> to(bucket: "example-bucket") + ``` + {{% /cloud-only %}} + +7. {{% oss-only %}} + + Click {{% icon "notebook-add-cell" %}} after your **Flux Script** cell to + add a new cell and select **{{% caps %}}Output to Bucket{{% /caps %}}**. + Select a bucket from the **{{% icon "bucket" %}} Choose a bucket** + drop-down list. + + {{% /oss-only %}} + +8. _(Optional)_ Click {{% icon "notebook-add-cell" %}} and select **Note** to + add a note to describe your notebook, for example, + "Downsample Coinbase bitcoin prices into hourly averages." +9. {{% oss-only %}} + + Click **Preview** in the upper left to verify that your notebook runs and displays the output. + + {{% /oss-only %}} +10. Click **Run** to run the notebook and write the downsampled data to your bucket. + +## Continuously run a notebook +To continuously run your notebook, export the notebook as a task: + +1. Click {{% icon "notebook-add-cell" %}} to add a new cell, and then select + **{{% caps %}}Task{{% /caps %}}**. +2. Provide the following: + + - **Every**: Interval that the task should run at. + - **Offset**: _(Optional)_ Time to wait after the defined interval to execute the task. + This allows the task to capture late-arriving data. + +3. Click **{{% icon "export" %}} Export as Task**. diff --git a/content/influxdb/v2.5/notebooks/manage-notebooks.md b/content/influxdb/v2.5/notebooks/manage-notebooks.md new file mode 100644 index 000000000..519b6816f --- /dev/null +++ b/content/influxdb/v2.5/notebooks/manage-notebooks.md @@ -0,0 +1,56 @@ +--- +title: Manage notebooks +description: View, update, and delete notebooks. +weight: 103 +influxdb/v2.5/tags: [notebooks] +menu: + influxdb_2_5: + name: Manage notebooks + parent: Notebooks +--- + +Manage your notebooks in the UI: + +- [View or update a notebook](#view-or-update-notebooks) +- {{% cloud-only %}}[Share a notebook](#share-a-notebook){{% /cloud-only %}} +- {{% cloud-only %}}[Unshare a notebook](#unshare-a-notebook){{% /cloud-only %}} +- [Delete a notebook](#delete-a-notebook) + +## View or update notebooks + +1. In the navigation menu on the left, click **Notebooks**. + + {{< nav-icon "notebooks" >}} + + A list of notebooks appears. +2. Click a notebook to open it. +3. To update, edit the notebook's cells and content. Changes are saved automatically. + +{{% cloud-only %}} + +## Share a notebook + +1. In the navigation menu on the left, click **Notebooks**. + +{{< nav-icon "notebooks" >}} + +2. Click the notebook to open it, and then click the **{{< icon "share" >}}** icon. +3. Select an API token with read-access to all resources in the notebook, + and then click the **{{< icon "check" >}}** icon. +4. Share the generated notebook URL as needed. + +## Unshare a notebook + +To stop sharing a notebook, select **{{< icon "trash" >}}** next to the shared notebook URL. + +{{% /cloud-only %}} + +## Delete a notebook + +1. In the navigation menu on the left, click **Notebooks**. + + {{< nav-icon "notebooks" >}} + +2. Hover over a notebook in the list that appears. +3. Click **Delete Notebook**. +4. Click **Confirm**. diff --git a/content/influxdb/v2.5/notebooks/overview.md b/content/influxdb/v2.5/notebooks/overview.md new file mode 100644 index 000000000..3361e2d1f --- /dev/null +++ b/content/influxdb/v2.5/notebooks/overview.md @@ -0,0 +1,97 @@ +--- +title: Overview of notebooks +description: > + Learn about the building blocks of a notebook. +weight: 101 +influxdb/v2.5/tags: [notebooks] +menu: + influxdb_2_5: + name: Overview of notebooks + parent: Notebooks +--- + +Learn how notebooks can help to streamline and simplify your day-to-day business processes. + +See an overview of [notebook concepts](/influxdb/v2.5/notebooks/overview/#notebook-concepts), [notebook controls](/influxdb/v2.5/notebooks/overview/#notebook-controls), and [notebook cell types](/influxdb/v2.5/notebooks/overview/#notebook-cell-types) also know as the basic building blocks of a notebook. + +## Notebook concepts + +You can think of an InfluxDB notebook as a collection of sequential data processing steps. Each step is represented by a "cell" that performs an action such as querying, visualizing, processing, or writing data to your buckets. Notebooks help you do the following: + +- Create snippets of live code, equations, visualizations, and explanatory notes. +- Create alerts or scheduled tasks. +- Downsample and normalize data. +- Build runbooks to share with your teams. +- Output data to buckets. + +## Notebook controls + +The following options appear at the top of each notebook. + +{{% oss-only %}} + +### Preview/Run mode + +- Select **Preview** (or press **Control+Enter**) to display results of each cell without writing data. Helps to verify that cells return expected results before writing data. + +{{% /oss-only %}} + +{{% cloud-only %}} + +### Run + +Select {{< caps >}}Run{{< /caps >}} (or press **Control+Enter**) to display results of each cell and write data to the selected bucket. + +{{% /cloud-only %}} + +### Save Notebook (appears before first save) + +Select {{< caps >}}Save Notebook{{< /caps >}} to save all notebook cells. Once you've saved the notebook, this button disappears and the notebook automatically saves as subsequent changes are made. + +{{% note %}} +Saving the notebook does not save cell results. When you open a saved notebook, click {{< caps >}}**Run**{{< /caps >}} to update cell results. +{{% /note %}} + +### Local or UTC timezone + +Click the timezone drop-down list to select a timezone to use for the notebook. Select either the local time (default) or UTC. + +### Time range + +Select from the options in the dropdown list or select **Custom Time Range** to enter a custom time range with precision up to nanoseconds, and then click **{{< caps >}}Apply Time Range{{< /caps >}}**. + +{{% cloud-only %}} + +### Share notebook + +To generate a URL for the notebook, click the **{{< icon "share" >}}** icon. +For more detail, see how to [share a notebook](/influxdb/cloud/notebooks/manage-notebooks/#share-a-notebook). + +{{% /cloud-only %}} + +## Notebook cell types + +The following cell types are available for your notebook: +- [Data source](#data-source) +- [Visualization](#visualization) +- [Action](#action) + +### Data source + +At least one data source (input) cell is required in a notebook for other cells to run. + +- **{{< caps >}}Query Builder{{< /caps >}}**: Build a query with the Flux query builder. +- **{{< caps >}}Flux Script{{< /caps >}}**: Enter a raw Flux script. + + Data source cells work like the **Query Builder** or **Script Editor** in Data Explorer. For more information, see how to [query data with Flux and the Data Explorer](/influxdb/v2.5/query-data/execute-queries/data-explorer/#query-data-with-flux-and-the-data-explorer). + +### Visualization + +- **{{< caps >}}Table{{< /caps >}}**: View your data in a table. +- **{{< caps >}}Graph{{< /caps >}}**: View your data in a graph. +- **{{< caps >}}Note{{< /caps >}}**: Create explanatory notes or other information for yourself or your team members. + +### Action + +- **{{< caps >}}Alert{{< /caps >}}**: Set up alerts. See how to [monitor data and send alerts](/influxdb/v2.5/monitor-alert/). +- **{{< caps >}}Tasks{{< /caps >}}**: Use the notebook to set up and export a task. See how to [manage tasks in InfluxDB](/influxdb/v2.5/process-data/manage-tasks/). diff --git a/content/influxdb/v2.5/notebooks/troubleshoot-notebooks.md b/content/influxdb/v2.5/notebooks/troubleshoot-notebooks.md new file mode 100644 index 000000000..0f73750b9 --- /dev/null +++ b/content/influxdb/v2.5/notebooks/troubleshoot-notebooks.md @@ -0,0 +1,14 @@ +--- +title: Troubleshoot notebooks +description: Common issues with the notebooks feature. +weight: 106 +influxdb/v2.5/tags: [notebooks] +menu: + influxdb_2_5: + name: Troubleshoot notebooks + parent: Notebooks +--- + +### No measurements appear in my bucket even though there's data in it. + +Try changing the time range. You might have measurements prior to the time range you selected. For example, if the selected time range is `Past 1h` and the last write happened 16 hours ago, you'd need to change the time range to `Past 24h` (or more) to see your data. diff --git a/content/influxdb/v2.5/organizations/_index.md b/content/influxdb/v2.5/organizations/_index.md new file mode 100644 index 000000000..de86846e6 --- /dev/null +++ b/content/influxdb/v2.5/organizations/_index.md @@ -0,0 +1,17 @@ +--- +title: Manage organizations +seotitle: Manage organizations in InfluxDB +description: Manage organizations in InfluxDB using the InfluxDB UI or the influx CLI. +menu: + influxdb_2_5: + name: Manage organizations +weight: 10 +influxdb/v2.5/tags: [organizations] +--- + +An **organization** is a workspace for a group of users. +All dashboards, tasks, buckets, members, etc., belong to an organization. + +The following articles provide information about managing organizations: + +{{< children >}} diff --git a/content/influxdb/v2.5/organizations/buckets/_index.md b/content/influxdb/v2.5/organizations/buckets/_index.md new file mode 100644 index 000000000..891108f53 --- /dev/null +++ b/content/influxdb/v2.5/organizations/buckets/_index.md @@ -0,0 +1,20 @@ +--- +title: Manage buckets +seotitle: Manage buckets in InfluxDB +description: Manage buckets in InfluxDB using the InfluxDB UI or the influx CLI. +menu: + influxdb_2_5: + name: Manage buckets + parent: Manage organizations +weight: 105 +influxdb/v2.5/tags: [buckets] +--- + +A **bucket** is a named location where time series data is stored. +All buckets have a **retention period**, a duration of time that each data point persists. +InfluxDB drops all points with timestamps older than the bucket's retention period. +A bucket belongs to an organization. + +The following articles provide information about managing buckets: + +{{< children >}} diff --git a/content/influxdb/v2.5/organizations/buckets/create-bucket.md b/content/influxdb/v2.5/organizations/buckets/create-bucket.md new file mode 100644 index 000000000..ab7280571 --- /dev/null +++ b/content/influxdb/v2.5/organizations/buckets/create-bucket.md @@ -0,0 +1,112 @@ +--- +title: Create a bucket +seotitle: Create a bucket in InfluxDB +description: Create buckets to store time series data in InfluxDB using the InfluxDB UI or the influx CLI. +menu: + influxdb_2_5: + name: Create a bucket + parent: Manage buckets +weight: 201 +--- + +Use the InfluxDB user interface (UI) or the `influx` command line interface (CLI) +to create a bucket. + +{{% note %}} +#### Bucket limits +A single InfluxDB {{< current-version >}} OSS instance supports approximately 20 buckets actively being +written to or queried across all organizations depending on the use case. +Any more than that can adversely affect performance. +{{% /note %}} + +## Create a bucket in the InfluxDB UI + +There are two places you can create a bucket in the UI. + +### Create a bucket from the Load Data menu + +1. In the navigation menu on the left, select **Data (Load Data)** > **Buckets**. + + {{< nav-icon "data" >}} + +2. Click **{{< icon "plus" >}} Create Bucket** in the upper right. +3. Enter a **Name** for the bucket. +4. Select when to **Delete Data**: + - **Never** to retain data forever. + - **Older than** to choose a specific retention period. +5. Click **Create** to create the bucket. + +### Create a bucket in the Data Explorer + +1. In the navigation menu on the left, select **Explore* (**Data Explorer**). + + {{< nav-icon "data-explorer" >}} + +2. In the **From** panel in the Flux Builder, select `+ Create Bucket`. +3. Enter a **Name** for the bucket. +4. Select when to **Delete Data**: + - **Never** to retain data forever. + - **Older than** to choose a specific retention period. +5. Click **Create** to create the bucket. + +## Create a bucket using the influx CLI + +Use the [`influx bucket create` command](/influxdb/v2.5/reference/cli/influx/bucket/create) +to create a new bucket. A bucket requires the following: + +- bucket name +- organization name or ID +- retention period (duration to keep data) in one of the following units: + - nanoseconds (`ns`) + - microseconds (`us` or `µs`) + - milliseconds (`ms`) + - seconds (`s`) + - minutes (`m`) + - hours (`h`) + - days (`d`) + - weeks (`w`) + + {{% note %}} + The minimum retention period is **one hour**. + {{% /note %}} + +```sh +# Syntax +influx bucket create -n -o -r + +# Example +influx bucket create -n my-bucket -o my-org -r 72h +``` + +## Create a bucket using the InfluxDB API + +Use the InfluxDB API to create a bucket. + +{{% note %}} +#### Bucket limits +A single InfluxDB {{< current-version >}} OSS instance supports approximately 20 buckets actively being +written to or queried across all organizations depending on the use case. +Any more than that can adversely affect performance. +{{% /note %}} + +Create a bucket in InfluxDB using an HTTP request to the InfluxDB API `/buckets` endpoint. +Use the `POST` request method and include the following in your request: + +| Requirement | Include by | +|:----------- |:---------- | +| Organization | Use `orgID` in the JSON payload. | +| Bucket | Use `name` in the JSON payload. | +| Retention Rules | Use `retentionRules` in the JSON payload. | +| API token | Use the `Authorization: Token` header. | + +#### Example + +The URL depends on the version and location of your InfluxDB {{< current-version >}} +instance _(see [InfluxDB URLs](/influxdb/v2.5/reference/urls/))_. + +```sh +{{% get-shared-text "api/v2.0/buckets/oss/create.sh" %}} +``` + +_For information about **InfluxDB API options and response codes**, see +[InfluxDB API Buckets documentation](/influxdb/v2.5/api/#operation/PostBuckets)._ diff --git a/content/influxdb/v2.5/organizations/buckets/delete-bucket.md b/content/influxdb/v2.5/organizations/buckets/delete-bucket.md new file mode 100644 index 000000000..e1a7d9cbc --- /dev/null +++ b/content/influxdb/v2.5/organizations/buckets/delete-bucket.md @@ -0,0 +1,72 @@ +--- +title: Delete a bucket +seotitle: Delete a bucket from InfluxDB +description: Delete a bucket from InfluxDB using the InfluxDB UI or the influx CLI +menu: + influxdb_2_5: + name: Delete a bucket + parent: Manage buckets +weight: 203 +--- + +Use the InfluxDB user interface (UI) or the `influx` command line interface (CLI) +to delete a bucket. + +## Delete a bucket in the InfluxDB UI + +{{% oss-only %}} + +1. In the navigation menu on the left, select **Data (Load Data)** > **Buckets**. + +{{< nav-icon "data" >}} + +2. Hover over the bucket you would like to delete. +3. Click the **{{< icon "delete" >}}** icon located far right of the bucket name. +4. Click **Delete** to delete the bucket. +{{% /oss-only %}} + +{{% cloud-only %}} + +1. In the navigation menu on the left, select **Load Data** > **Buckets**. + +{{< nav-icon "data" >}} + +2. Find the bucket that you would like to delete. +3. Click the **{{< icon "delete" >}}** icon located far right of the bucket name. +4. Click **{{< caps >}}Confirm{{< /caps >}}** to delete the bucket. + +{{% /cloud-only %}} + +## Delete a bucket using the influx CLI + +Use the [`influx bucket delete` command](/influxdb/v2.5/reference/cli/influx/bucket/delete) +to delete a bucket a bucket by name or ID. + +### Delete a bucket by name +**To delete a bucket by name, you need:** + +- Bucket name +- Bucket's organization name or ID + + +```sh +# Syntax +influx bucket delete -n -o + +# Example +influx bucket delete -n my-bucket -o my-org +``` + +### Delete a bucket by ID +**To delete a bucket by ID, you need:** + +- Bucket ID _(provided in the output of `influx bucket list`)_ + + +```sh +# Syntax +influx bucket delete -i + +# Example +influx bucket delete -i 034ad714fdd6f000 +``` diff --git a/content/influxdb/v2.5/organizations/buckets/update-bucket.md b/content/influxdb/v2.5/organizations/buckets/update-bucket.md new file mode 100644 index 000000000..55328fc6f --- /dev/null +++ b/content/influxdb/v2.5/organizations/buckets/update-bucket.md @@ -0,0 +1,88 @@ +--- +title: Update a bucket +seotitle: Update a bucket in InfluxDB +description: Update a bucket's name or retention period in InfluxDB using the InfluxDB UI or the influx CLI. +menu: + influxdb_2_5: + name: Update a bucket + parent: Manage buckets +weight: 202 +--- + +Use the `influx` command line interface (CLI) or the InfluxDB user interface (UI) to update a bucket. + +Note that updating an bucket's name will affect any assets that reference the bucket by name, including the following: + + - Queries + - Dashboards + - Tasks + - Telegraf configurations + - Templates + +If you change a bucket name, be sure to update the bucket in the above places as well. + +## Update a bucket's name in the InfluxDB UI + +1. In the navigation menu on the left, select **Data (Load Data)** > **Buckets**. + + {{< nav-icon "data" >}} + +2. Click **Settings** under the bucket you want to rename. +3. Click **Rename**. +3. Review the information in the window that appears and click **I understand, let's rename my bucket**. +4. Update the bucket's name and click **Change Bucket Name**. + +## Update a bucket's retention period in the InfluxDB UI + +1. In the navigation menu on the left, select **Data (Load Data)** > **Buckets**. + + {{< nav-icon "data" >}} + +2. Click **Settings** next to the bucket you want to update. +3. In the window that appears, edit the bucket's retention period. +4. Click **Save Changes**. + +## Update a bucket using the influx CLI + +Use the [`influx bucket update` command](/influxdb/v2.5/reference/cli/influx/bucket/update) +to update a bucket. Updating a bucket requires the following: + +- The bucket ID _(provided in the output of `influx bucket list`)_ +- The name or ID of the organization the bucket belongs to. + +{{< cli/influx-creds-note >}} + +##### Update the name of a bucket + +```sh +# Syntax +influx bucket update -i -n + +# Example +influx bucket update -i 034ad714fdd6f000 -n my-new-bucket +``` + +##### Update a bucket's retention period + +Valid retention period duration units: + +- nanoseconds (`ns`) +- microseconds (`us` or `µs`) +- milliseconds (`ms`) +- seconds (`s`) +- minutes (`m`) +- hours (`h`) +- days (`d`) +- weeks (`w`) + +{{% note %}} +The minimum retention period is **one hour**. +{{% /note %}} + +```sh +# Syntax +influx bucket update -i -r + +# Example +influx bucket update -i 034ad714fdd6f000 -r 1209600000000000ns +``` diff --git a/content/influxdb/v2.5/organizations/buckets/view-buckets.md b/content/influxdb/v2.5/organizations/buckets/view-buckets.md new file mode 100644 index 000000000..cf0461d0f --- /dev/null +++ b/content/influxdb/v2.5/organizations/buckets/view-buckets.md @@ -0,0 +1,34 @@ +--- +title: View buckets +seotitle: View buckets in InfluxDB +description: View a list of all the buckets for an organization in InfluxDB using the InfluxDB UI or the influx CLI. +menu: + influxdb_2_5: + name: View buckets + parent: Manage buckets +weight: 202 +--- + +## View buckets in the InfluxDB UI + +1. In the navigation menu on the left, select **Data (Load Data)** > **Buckets**. + + {{< nav-icon "data" >}} + + A list of buckets with their retention policies and IDs appears. + +2. Click a bucket to open it in the **Data Explorer**. +3. Click the **bucket ID** to copy it to the clipboard. + +## View buckets using the influx CLI + +Use the [`influx bucket list` command](/influxdb/v2.5/reference/cli/influx/bucket/list) +to view a buckets in an organization. + +```sh +influx bucket list +``` + +Other filtering options such as filtering by organization, name, or ID are available. +See the [`influx bucket list` documentation](/influxdb/v2.5/reference/cli/influx/bucket/list) +for information about other available flags. diff --git a/content/influxdb/v2.5/organizations/create-org.md b/content/influxdb/v2.5/organizations/create-org.md new file mode 100644 index 000000000..d78b751ac --- /dev/null +++ b/content/influxdb/v2.5/organizations/create-org.md @@ -0,0 +1,47 @@ +--- +title: Create an organization +seotitle: Create an organization in InfluxDB +description: Create an organization in InfluxDB using the InfluxDB UI or the influx CLI. +menu: + influxdb_2_5: + name: Create an organization + parent: Manage organizations +weight: 101 +products: [oss] +--- + +Use the InfluxDB user interface (UI) or the `influx` command line interface (CLI) +to create an organization. + +{{% note %}} +#### Organization and bucket limits +A single InfluxDB {{< current-version >}} OSS instance supports approximately 20 buckets actively being +written to or queried across all organizations depending on the use case. +Any more than that can adversely affect performance. +Because each organization is created with a bucket, we do not recommend more than +20 organizations in a single InfluxDB OSS instance. +{{% /note %}} + +## Create an organization in the InfluxDB UI + +1. In the navigation menu on the left, click the **Account dropdown**. + + {{< nav-icon "account" >}} + +2. Select **Create Organization**. +3. In the window that appears, enter an **Organization Name** and **Bucket Name** and click **Create**. + +## Create an organization using the influx CLI + +Use the [`influx org create` command](/influxdb/v2.5/reference/cli/influx/org/create) +to create a new organization. A new organization requires the following: + +- A name for the organization + +```sh +# Syntax +influx org create -n + +# Example +influx org create -n my-org +``` diff --git a/content/influxdb/v2.5/organizations/delete-org.md b/content/influxdb/v2.5/organizations/delete-org.md new file mode 100644 index 000000000..9d8bb6276 --- /dev/null +++ b/content/influxdb/v2.5/organizations/delete-org.md @@ -0,0 +1,41 @@ +--- +title: Delete an organization +seotitle: Delete an organization from InfluxDB +description: Delete an existing organization from InfluxDB using the influx CLI. +menu: + influxdb_2_5: + name: Delete an organization + parent: Manage organizations +weight: 104 +products: [oss] +--- + +Use the `influx` command line interface (CLI) +to delete an organization. + + + +## Delete an organization using the influx CLI + +Use the [`influx org delete` command](/influxdb/v2.5/reference/cli/influx/org/delete) +to delete an organization. Deleting an organization requires the following: + +- The organization ID _(provided in the output of `influx org list`)_ + +```sh +# Syntax +influx org delete -i + +# Example +influx org delete -i 034ad714fdd6f000 +``` diff --git a/content/influxdb/v2.5/organizations/members/_index.md b/content/influxdb/v2.5/organizations/members/_index.md new file mode 100644 index 000000000..146eaa5f7 --- /dev/null +++ b/content/influxdb/v2.5/organizations/members/_index.md @@ -0,0 +1,16 @@ +--- +title: Manage organization members +seotitle: Manage members of an organization in InfluxDB +description: Manage members of an organization in InfluxDB using the InfluxDB UI or CLI. +menu: + influxdb_2_5: + name: Manage members + parent: Manage organizations +weight: 106 +influxdb/v2.5/tags: [members] +--- + +A **member** is a user that belongs to an organization. +The following articles provide information about managing users: + +{{< children >}} diff --git a/content/influxdb/v2.5/organizations/members/add-member.md b/content/influxdb/v2.5/organizations/members/add-member.md new file mode 100644 index 000000000..afbb58d5c --- /dev/null +++ b/content/influxdb/v2.5/organizations/members/add-member.md @@ -0,0 +1,55 @@ +--- +title: Add a member +seotitle: Add a member to an organization in InfluxDB +description: > + Use the `influx` command line interface (CLI) to add a member to an organization + and optionally make that member an owner across all organizations. +menu: + influxdb_2_5: + name: Add a member + parent: Manage members +weight: 201 +--- + +Use the `influx` command line interface (CLI) to add a member to an organization +and optionally make that member an owner across all organizations. + +## Add a member to an organization using the influx CLI + +1. Get a list of users and their IDs by running the following: + + ```sh + influx user list + ``` + +2. To add a user as a member of an organization, use the `influx org members add command`. + Provide the following: + + - Organization name + - User ID + - _(Optional)_ `--owner` flag to add the user as an owner + _(requires an [operator token](/influxdb/v2.5/security/tokens/#operator-token))_ + + {{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Add member](#) +[Add owner](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +influx org members add \ + -n \ + -m +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sh +influx org members add \ + -n \ + -m \ + --owner +``` +{{% /code-tab-content %}} + {{< /code-tabs-wrapper >}} + +For more information, see the [`influx org members add` command](/influxdb/v2.5/reference/cli/influx/org/members/add). diff --git a/content/influxdb/v2.5/organizations/members/remove-member.md b/content/influxdb/v2.5/organizations/members/remove-member.md new file mode 100644 index 000000000..9de5d72a7 --- /dev/null +++ b/content/influxdb/v2.5/organizations/members/remove-member.md @@ -0,0 +1,44 @@ +--- +title: Remove a member +seotitle: Remove a member from an organization in InfluxDB +description: Remove a member from an organization. +menu: + influxdb_2_5: + name: Remove a member + parent: Manage members +weight: 203 +--- + +Use the InfluxDB user interface (UI) or the `influx` command line interface (CLI) +to remove a member from an organization. + +{{% note %}} +Removing a member from an organization removes all permissions associated with the organization, +but it does not delete the user from the system entirely. +For information about deleting a user from InfluxDB, see [Delete a user](/influxdb/v2.5/users/delete-user/). +{{% /note %}} + +## Remove a member from an organization in the InfluxDB UI + +1. In the navigation menu on the left, click your **Account avatar** and select **Members**. + + {{< nav-icon "account" >}} + +2. Click the **{{< icon "delete" >}}** icon next to the member you want to delete. +3. Click **Delete** to confirm and remove the user from the organization. + +## Remove a member from an organization using the influx CLI + +Use the [`influx org members remove` command](/influxdb/v2.5/reference/cli/influx/org/members/remove) +to remove a member from an organization. Removing a member requires the following: + +- The organization name or ID _(provided in the output of [`influx org list`](/influxdb/v2.5/reference/cli/influx/org/list/))_ +- The member ID _(provided in the output of [`influx org members list`](/influxdb/v2.5/reference/cli/influx/org/members/list/))_ + +```sh +# Syntax +influx org members remove -o -i + +# Example +influx org members remove -o 00xXx0x00xXX0000 -i x0xXXXx00x0x000X +``` diff --git a/content/influxdb/v2.5/organizations/members/view-members.md b/content/influxdb/v2.5/organizations/members/view-members.md new file mode 100644 index 000000000..27c4ea126 --- /dev/null +++ b/content/influxdb/v2.5/organizations/members/view-members.md @@ -0,0 +1,35 @@ +--- +title: View members +seotitle: View members of an organization in InfluxDB +description: Review a list of members for an organization. +menu: + influxdb_2_5: + name: View members + parent: Manage members +weight: 202 +--- + +Use the InfluxDB user interface (UI) or the `influx` command line interface (CLI) +to view members of an organization. + +## View members of organization in the InfluxDB UI + +In the navigation menu on the left, click your **Account avatar** and select **Members**. + +{{< nav-icon "account" >}} + + +## View members of organization using the influx CLI + +Use the [`influx org members list` command](/influxdb/v2.5/reference/cli/influx/org/members/list) +to list members of an organization. Listing an organization's members requires the following: + +- The name or ID of the organization + +```sh +# Syntax +influx org members list -n + +# Example +influx org members list -n my-org +``` diff --git a/content/influxdb/v2.5/organizations/switch-org.md b/content/influxdb/v2.5/organizations/switch-org.md new file mode 100644 index 000000000..573efc185 --- /dev/null +++ b/content/influxdb/v2.5/organizations/switch-org.md @@ -0,0 +1,22 @@ +--- +title: Switch organizations +seotitle: Switch organizations in InfluxDB +description: Switch from one organization to another in the InfluxDB UI +menu: + influxdb_2_5: + name: Switch organizations + parent: Manage organizations +weight: 105 +products: [oss] +--- + +Use the InfluxDB user interface (UI) to switch from one organization to another. The organization you're currently viewing determines what dashboards, tasks, buckets, members, and other assets you can access. + +## Switch organizations in the InfluxDB UI + +1. In the navigation menu on the left, click the **Account dropdown**. + + {{< nav-icon "account" >}} + +2. Select **Switch Organizations**. +3. Click the organization you want to switch to. diff --git a/content/influxdb/v2.5/organizations/update-org.md b/content/influxdb/v2.5/organizations/update-org.md new file mode 100644 index 000000000..afcc1438c --- /dev/null +++ b/content/influxdb/v2.5/organizations/update-org.md @@ -0,0 +1,49 @@ +--- +title: Update an organization +seotitle: Update an organization in InfluxDB +description: Update an organization's name and assets in InfluxDB using the InfluxDB UI or the influx CLI. +menu: + influxdb_2_5: + name: Update an organization + parent: Manage organizations +weight: 103 +--- + +Use the `influx` command line interface (CLI) or the InfluxDB user interface (UI) to update an organization. + +Note that updating an organization's name will affect any assets that reference the organization by name, including the following: + + - Queries + - Dashboards + - Tasks + - Telegraf configurations + - Templates + +If you change an organization name, be sure to update the organization in the above places as well. + +## Update an organization in the InfluxDB UI + +1. In the navigation menu on the left, click the user icon > **About**. + + {{< img-hd src="/img/influxdb/user-icon.png" alt="User Icon" />}} + +2. Click **{{< icon "edit" >}} Rename**. A verification window appears. +3. Review the information, and then click **I understand, let's rename my organization**. +4. Enter a new name for your organization, and then click **Change organization name**. + +## Update an organization using the influx CLI + +Use the [`influx org update` command](/influxdb/v2.5/reference/cli/influx/org/update) +to update an organization. Updating an organization requires the following: + +- The org ID _(provided in the output of `influx org list`)_ + +##### Update the name of a organization + +```sh +# Syntax +influx org update -i -n + +# Example +influx org update -i 034ad714fdd6f000 -n my-new-org +``` diff --git a/content/influxdb/v2.5/organizations/view-orgs.md b/content/influxdb/v2.5/organizations/view-orgs.md new file mode 100644 index 000000000..702f160dd --- /dev/null +++ b/content/influxdb/v2.5/organizations/view-orgs.md @@ -0,0 +1,61 @@ +--- +title: View organizations +seotitle: View organizations in InfluxDB +description: Review a list of organizations in InfluxDB using the InfluxDB UI or the influx CLI. +menu: + influxdb_2_5: + name: View organizations + parent: Manage organizations +weight: 102 +--- + +Use the InfluxDB user interface (UI) or the `influx` command line interface (CLI) +to view organizations. + +## View organizations in the InfluxDB UI + +1. In the navigation menu on the left, click the **Account dropdown**. + + {{< nav-icon "account" >}} + +2. Select **Switch Organizations**. The list of organizations appears. + +## View organizations using the influx CLI + +Use the [`influx org list` command](/influxdb/v2.5/reference/cli/influx/org/list) +to view organizations. + +```sh +influx org list +``` + +Filtering options such as filtering by name or ID are available. +See the [`influx org list` documentation](/influxdb/v2.5/reference/cli/influx/org/list) +for information about other available flags. + +## View your organization ID + +Use the InfluxDB UI or `influx` CLI to view your organization ID. + +### Organization ID in the UI + +After logging in to the InfluxDB UI, your organization ID appears in the URL. + +{{< code-callout "03a2bbf46249a000" >}} +```sh +http://localhost:8086/orgs/03a2bbf46249a000/... +``` +{{< /code-callout >}} + + +### Organization ID in the CLI + +Use [`influx org list`](#view-organizations-using-the-influx-cli) to view your organization ID. + +```sh +> influx org list + +ID Name +03a2bbf46249a000 org-1 +03ace3a859669000 org-2 +``` diff --git a/content/influxdb/v2.5/process-data/_index.md b/content/influxdb/v2.5/process-data/_index.md new file mode 100644 index 000000000..6ec815567 --- /dev/null +++ b/content/influxdb/v2.5/process-data/_index.md @@ -0,0 +1,28 @@ +--- +title: Process data with InfluxDB tasks +seotitle: Process data with InfluxDB tasks +description: > + InfluxDB's task engine runs scheduled Flux tasks that process and analyze data. + This collection of articles provides information about creating and managing InfluxDB tasks. +menu: + influxdb_2_5: + name: Process data +weight: 6 +influxdb/v2.5/tags: [tasks] +related: + - /resources/videos/influxdb-tasks/ +--- + +Process and analyze your data with tasks in the InfluxDB **task engine**. +Use tasks (scheduled Flux queries) +to input a data stream and then analyze, modify, and act on the data accordingly. + +Discover how to create and manage tasks using the InfluxDB user interface (UI) +the `influx` command line interface (CLI), and the InfluxDB `/api/v2` API. +Find examples of data downsampling and other common tasks. + +{{% note %}} +Tasks replace InfluxDB v1.x continuous queries. +{{% /note %}} + +{{< children >}} diff --git a/content/influxdb/v2.5/process-data/common-tasks/_index.md b/content/influxdb/v2.5/process-data/common-tasks/_index.md new file mode 100644 index 000000000..425773a2c --- /dev/null +++ b/content/influxdb/v2.5/process-data/common-tasks/_index.md @@ -0,0 +1,17 @@ +--- +title: Common data processing tasks +seotitle: Common data processing tasks performed with with InfluxDB +description: > + InfluxDB Tasks process data on specified schedules. + This collection of articles walks through common use cases for InfluxDB tasks. +influxdb/v2.5/tags: [tasks] +menu: + influxdb_2_5: + name: Common tasks + parent: Process data +weight: 104 +--- + +The following articles walk through common task use cases. + +{{< children >}} diff --git a/content/influxdb/v2.5/process-data/common-tasks/calculate_weekly_mean.md b/content/influxdb/v2.5/process-data/common-tasks/calculate_weekly_mean.md new file mode 100644 index 000000000..f34c0a802 --- /dev/null +++ b/content/influxdb/v2.5/process-data/common-tasks/calculate_weekly_mean.md @@ -0,0 +1,51 @@ +--- +title: Calculate a weekly mean +description: > + Calculate a weekly mean and add it to a new bucket. +menu: + influxdb_2_5: + name: Calculate a weekly mean + parent: Common tasks +weight: 202 +influxdb/v2.5/tags: [tasks] +--- + +{{% note %}} +This example uses [NOAA water sample data](/influxdb/v2.5/reference/sample-data/#noaa-water-sample-data). +{{% /note %}} + +This example calculates a temperature weekly mean and stores it in a separate bucket. + +The sample query performs the following operations: + +- Uses [`filter()`](/{{< latest "flux" >}}/stdlib/universe/filter/) to select records with the `average_temperature` measurement. +- Uses [`range()`](/{{< latest "flux" >}}/stdlib/universe/range/) to define the start time. +- Uses [`aggregateWindow()`](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow/) to group records by week and compute the mean. +- Sends the weekly mean to a new bucket (`weekly_means`). + +```js +option task = { + name: "weekly-means", + every: 1w, +} + +from(bucket: "noaa") + |> filter(fn: (r) => r._measurement == "average_temperature") + |> range(start: 2019-09-01T11:24:00Z) + |> aggregateWindow(every: 1w, fn: mean) + |> to(bucket: "weekly_means") +``` + +### Example results + +| _start | _stop | _field | _measurement | location | _value | _time | +|:------ |:----- |:------ |:------------ |:-------- | ------: |:----- | +| 2019-09-01T11:24:00Z | 2020-10-19T20:39:49Z | degrees | average_temperature | coyote_creek | 80.31005917159763 | 2019-09-05T00:00:00Z | +| 2019-09-01T11:24:00Z | 2020-10-19T20:39:49Z | degrees | average_temperature | coyote_creek | 79.8422619047619 | 2019-09-12T00:00:00Z | +| 2019-09-01T11:24:00Z | 2020-10-19T20:39:49Z | degrees | average_temperature | coyote_creek | 79.82710622710623 | 2019-09-19T00:00:00Z | + +| _start | _stop | _field | _measurement | location | _value | _time | +|:------ |:----- |:------ |:------------ |:-------- | ------: |:----- | +| 2019-09-01T11:24:00Z | 2020-10-19T20:39:49Z | degrees | average_temperature | santa_monica | 80.19952494061758 | 2019-09-05T00:00:00Z | +| 2019-09-01T11:24:00Z | 2020-10-19T20:39:49Z | degrees | average_temperature | santa_monica | 80.01964285714286 | 2019-09-12T00:00:00Z | +| 2019-09-01T11:24:00Z | 2020-10-19T20:39:49Z | degrees | average_temperature | santa_monica | 80.20451 diff --git a/content/influxdb/v2.5/process-data/common-tasks/convert_results_to_json.md b/content/influxdb/v2.5/process-data/common-tasks/convert_results_to_json.md new file mode 100644 index 000000000..6df90b394 --- /dev/null +++ b/content/influxdb/v2.5/process-data/common-tasks/convert_results_to_json.md @@ -0,0 +1,45 @@ +--- +title: Convert results to JSON +seotitle: Convert results to JSON and send them to a URL +description: > + Use `json.encode()` to convert query results to JSON and `http.post()` to send them + to a URL endpoint. +menu: + influxdb_2_5: + name: Convert results to JSON + parent: Common tasks +weight: 203 +influxdb/v2.5/tags: [tasks] +--- +{{% note %}} +This example uses [NOAA water sample data](/influxdb/v2.5/reference/sample-data/#noaa-water-sample-data). +{{% /note %}} + +Send each record to a URL endpoint using the HTTP POST method. This example uses [`json.encode()`](/{{< latest "flux" >}}/stdlib/json/encode/) to convert a value into JSON bytes, then uses [`http.post()`](/{{< latest "flux" >}}/stdlib/http/post/) to send them to a URL endpoint. + +The following query: + - Uses [`filter()`](/{{< latest "flux" >}}/stdlib/universe/filter/) to filter the `average_temperature` measurement. + - Uses [`mean()`](/{{< latest "flux" >}}/stdlib/universe/mean/) to calculate the average value from results. + - Uses [`map()`](/{{< latest "flux" >}}/stdlib/universe/map/) to create a new column, `jsonStr`, and build a JSON object using column values from the query. It then byte-encodes the JSON object and stores it as a string in the `jsonStr` column. + - Uses [`http.post()`](/{{< latest "flux" >}}/stdlib/http/post/) to send the `jsonStr` value from each record to an HTTP endpoint. + + +```js +import "http" +import "json" + +from(bucket: "noaa") + |> filter(fn: (r) => r._measurement == "average_temperature") + |> mean() + |> map(fn: (r) => ({r with jsonStr: string(v: json.encode(v: {"location": r.location, "mean": r._value}))})) + |> map( + fn: (r) => ({ + r with + status_code: http.post( + url: "http://somehost.com/", + headers: {x: "a", y: "b"}, + data: bytes(v: r.jsonStr) + ) + }) + ) +``` diff --git a/content/influxdb/v2.5/process-data/common-tasks/downsample-data.md b/content/influxdb/v2.5/process-data/common-tasks/downsample-data.md new file mode 100644 index 000000000..e6012ddea --- /dev/null +++ b/content/influxdb/v2.5/process-data/common-tasks/downsample-data.md @@ -0,0 +1,76 @@ +--- +title: Downsample data with InfluxDB +seotitle: Downsample data in an InfluxDB task +description: > + How to create a task that downsamples data much like continuous queries + in previous versions of InfluxDB. +menu: + influxdb_2_5: + name: Downsample data + parent: Common tasks +weight: 201 +influxdb/v2.5/tags: [tasks] +--- + +One of the most common use cases for InfluxDB tasks is downsampling data to reduce +the overall disk usage as data collects over time. +In previous versions of InfluxDB, continuous queries filled this role. + +This article walks through creating a continuous-query-like task that downsamples +data by aggregating data within windows of time, then storing the aggregate value in a new bucket. + +### Requirements +To perform a downsampling task, you need to the following: + +##### A "source" bucket +The bucket from which data is queried. + +##### A "destination" bucket +A separate bucket where aggregated, downsampled data is stored. + +##### Some type of aggregation +To downsample data, it must be aggregated in some way. +What specific method of aggregation you use depends on your specific use case, +but examples include mean, median, top, bottom, etc. +View [Flux's aggregate functions](/{{< latest "flux" >}}/function-types/#aggregates) +for more information and ideas. + +## Example downsampling task script +The example task script below is a very basic form of data downsampling that does the following: + +1. Defines a task named "cq-mem-data-1w" that runs once a week. +2. Defines a `data` variable that represents all data from the last 2 weeks in the + `mem` measurement of the `system-data` bucket. +3. Uses the [`aggregateWindow()` function](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow/) + to window the data into 1 hour intervals and calculate the average of each interval. +4. Stores the aggregated data in the `system-data-downsampled` bucket under the + `my-org` organization. + +```js +// Task Options +option task = {name: "cq-mem-data-1w", every: 1w} + +// Defines a data source +data = from(bucket: "system-data") + |> range(start: -duration(v: int(v: task.every) * 2)) + |> filter(fn: (r) => r._measurement == "mem") + +data + // Windows and aggregates the data in to 1h averages + |> aggregateWindow(fn: mean, every: 1h) + // Stores the aggregated data in a new bucket + |> to(bucket: "system-data-downsampled", org: "my-org") +``` + +Again, this is a very basic example, but it should provide you with a foundation +to build more complex downsampling tasks. + +## Add your task +Once your task is ready, see [Create a task](/influxdb/v2.5/process-data/manage-tasks/create-task) for information about adding it to InfluxDB. + +## Things to consider +- If there is a chance that data may arrive late, specify an `offset` in your + task options long enough to account for late-data. +- If running a task against a bucket with a finite retention period, + schedule tasks to run prior to the end of the retention period to let + downsampling tasks complete before data outside of the retention period is dropped. diff --git a/content/influxdb/v2.5/process-data/get-started.md b/content/influxdb/v2.5/process-data/get-started.md new file mode 100644 index 000000000..cbe6c4b1a --- /dev/null +++ b/content/influxdb/v2.5/process-data/get-started.md @@ -0,0 +1,277 @@ +--- +title: Get started with InfluxDB tasks +list_title: Get started with tasks +description: > + Learn the basics of writing an InfluxDB task that processes data, and then performs an action, + such as storing the modified data in a new bucket or sending an alert. +aliases: + - /influxdb/v2.5/process-data/write-a-task/ +influxdb/v2.5/tags: [tasks] +menu: + influxdb_2_5: + name: Get started with tasks + parent: Process data +weight: 101 +related: + - /influxdb/v2.5/process-data/manage-tasks/ + - /influxdb/v2.5/process-data/manage-tasks/create-task/ + - /resources/videos/influxdb-tasks/ +--- + +An **InfluxDB task** is a scheduled Flux script that takes a stream of input data, +modifies or analyzes it in some way, then writes the modified data back to InfluxDB +or performs other actions. + +This article walks through writing a basic InfluxDB task that downsamples +data and stores it in a new bucket. + +## Components of a task + +Every InfluxDB task needs the following components. +Their form and order can vary, but they are all essential parts of a task. + +- [Task options](#define-task-options) +- [A data source](#define-a-data-source) +- [Data processing or transformation](#process-or-transform-your-data) +- [A destination](#define-a-destination) + +_[Skip to the full example task script](#full-example-flux-task-script)_ + +## Define task options + +Task options define the schedule, name, and other information about the task. +The following example shows how to set task options in a Flux script: + +```js +option task = {name: "downsample_5m_precision", every: 1h, offset: 0m} +``` + +_See [Task configuration options](/influxdb/v2.5/process-data/task-options) for detailed information +about each option._ + +_Note that InfluxDB doesn't guarantee that a task will run at the scheduled time. +See [View task run logs for a task](/influxdb/v2.5/process-data/manage-tasks/task-run-history) +for detailed information on task service-level agreements (SLAs)._ + +{{% note %}} +The InfluxDB UI provides a form for defining task options. +{{% /note %}} + + +{{% cloud-only %}} + +### Task options for invokable scripts + +Use the InfluxDB Cloud API to create tasks that reference and run [invokable scripts](influxdb/cloud/api-guide/api-invokable-scripts/). +When you create or update the task, pass task options as properties in the request body--for example: + +```json + { + "name": "30-day-avg-temp", + "description": "IoT Center 30d environment average.", + "every": "1d", + "offset": "0m" + ... + } +``` + +To learn more about creating tasks that run invokable scripts, see how to [create a task that references a script](/influxdb/cloud/process-data/manage-tasks/create-task/#create-a-task-that-references-a-script). + +{{% /cloud-only %}} + +## Retrieve and filter data + +A minimal Flux script uses the following functions to retrieve a specified amount +of data from a data source +and then filter the data based on time or column values: + +1. [`from()`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/from/): + queries data from InfluxDB {{% cloud-only %}}Cloud{{% /cloud-only %}}. +2. [`range()`](/{{< latest "flux" >}}/stdlib/universe/range/): defines the time + range to return data from. +3. [`filter()`](/{{< latest "flux" >}}/stdlib/universe/filter/): filters + data based on column values. + +The following sample Flux retrieves data from an InfluxDB bucket and then filters by +the `_measurement` and `host` columns: + +```js +from(bucket: "example-bucket") + |> range(start: -task.every) + |> filter(fn: (r) => r._measurement == "mem" and r.host == "myHost") +``` + +_To retrieve data from other sources, see [Flux input functions](/{{< latest "flux" >}}/function-types/#inputs)._ + +{{% note %}} + +#### Use task options in your Flux script + +InfluxDB stores options in a `task` option record that you can reference in your Flux script. +The following sample Flux uses the time range `-task.every`: + +```js +from(bucket: "example-bucket") + |> range(start: -task.every) + |> filter(fn: (r) => r._measurement == "mem" and r.host == "myHost") +``` + +`task.every` is dot notation that references the `every` property of the `task` option record. +`every` is defined as `1h`, therefore `-task.every` equates to `-1h`. + +Using task options to define values in your Flux script can make reusing your task easier. +{{% /note %}} + +## Process or transform your data + +Tasks run scripts automatically at regular intervals. +Scripts process or transform data in some way--for example: downsampling, detecting +anomalies, or sending notifications. + +Consider a task that runs hourly and downsamples data by calculating the average of set intervals. +It uses [`aggregateWindow()`](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow/) +to group points into 5-minute (`5m`) windows and calculate the average of each +window with [`mean()`](/{{< latest "flux" >}}/stdlib/universe/mean/). + +The following sample code shows the Flux script with task options: + +```js +option task = {name: "downsample_5m_precision", every: 1h, offset: 0m} + +from(bucket: "example-bucket") + |> range(start: -task.every) + |> filter(fn: (r) => r._measurement == "mem" and r.host == "myHost") + |> aggregateWindow(every: 5m, fn: mean) +``` + +{{% note %}} +#### Use offset to account for latent data + +Use the `offset` task option to account for potentially latent data (like data from edge devices). +A task that runs at one hour intervals (`every: 1h`) with an offset of five minutes (`offset: 5m`) +executes 5 minutes after the hour, but queries data from the original one-hour interval. +{{% /note %}} + +_See [Common tasks](/influxdb/v2.5/process-data/common-tasks) for examples of tasks commonly used with InfluxDB._ + +{{% cloud-only %}} + +### Process data with invokable scripts + +In InfluxDB Cloud, you can create tasks that run invokable scripts. +You can use invokable scripts to manage and reuse scripts for your organization. +You can use tasks to schedule script runs with options and parameters. + +The following sample `POST /api/v2/scripts` request body defines a new invokable script with the Flux from the previous example: + +```json +{ + "name": "aggregate-intervals", + "description": "Group points into 5 minute windows and calculate the average of each + window.", + "script": "from(bucket: "example-bucket")\ + |> range(start: -task.every)\ + |> filter(fn: (r) => r._measurement == "mem" and r.host == "myHost")\ + |> aggregateWindow(every: 5m, fn: mean)", + "language": "flux" +} +``` + +Note that the script doesn't contain task options. +Once you create the invokable script, you can use `POST /api/v2/tasks` to create a task that runs the script. +The following sample request body defines a task with the script ID and options: + +```json +{ + "every": "1h", + "description": "Downsample host with 5 min precision.", + "name": "downsample_5m_precision", + "scriptID": "09b2136232083000" +} +``` + +To create a script and a task that use parameters, see how to [create a task to run an invokable script](/influxdb/cloud/process-data/manage-tasks/create-task/). + +{{% /cloud-only %}} + +## Define a destination + +In most cases, you'll want to send and store data after the task has transformed it. +The destination could be a separate InfluxDB measurement or bucket. + +The example below uses [`to()`](/{{< latest "flux" >}}/stdlib/universe/to) +to write the transformed data back to another InfluxDB bucket: + +```js +// ... + |> to(bucket: "example-downsampled", org: "my-org") +``` + +To write data into InfluxDB, `to()` requires the following columns: + +- `_time` +- `_measurement` +- `_field` +- `_value` + +_To write data to other destinations, see +[Flux output functions](/{{< latest "flux" >}}/function-types/#outputs)._ + +## Full example Flux task script + +The following sample Flux combines all the components described in this guide: + +```js +// Task options +option task = {name: "downsample_5m_precision", every: 1h, offset: 0m} + +// Data source +from(bucket: "example-bucket") + |> range(start: -task.every) + |> filter(fn: (r) => r._measurement == "mem" and r.host == "myHost") + // Data processing + |> aggregateWindow(every: 5m, fn: mean) + // Data destination + |> to(bucket: "example-downsampled") +``` + +{{% cloud-only %}} + +## Full example task with invokable script + +The following sample code shows a `POST /api/v2/scripts` request body that +combines the components described in this guide: + +```json +{ + "name": "aggregate-intervals-and-export", + "description": "Group points into 5 minute windows and calculate the average of each + window.", + "script": "from(bucket: "example-bucket")\ + |> range(start: -task.every)\ + |> filter(fn: (r) => r._measurement == "mem" and r.host == "myHost")\ + // Data processing\ + |> aggregateWindow(every: 5m, fn: mean)\ + // Data destination\ + |> to(bucket: "example-downsampled")", + "language": "flux" +} +``` + +The following sample code shows a `POST /api/v2/tasks` request body to +schedule the script: + +```json +{ + "every": "1h", + "description": "Downsample host with 5 min precision.", + "name": "downsample_5m_precision", + "scriptID": "SCRIPT_ID" +} +``` + +{{% /cloud-only %}} + +To learn more about InfluxDB tasks and how they work, watch the following video: + +{{< youtube zgCmdtZaH9M >}} diff --git a/content/influxdb/v2.5/process-data/manage-tasks/_index.md b/content/influxdb/v2.5/process-data/manage-tasks/_index.md new file mode 100644 index 000000000..e31519387 --- /dev/null +++ b/content/influxdb/v2.5/process-data/manage-tasks/_index.md @@ -0,0 +1,20 @@ +--- +title: Manage tasks in InfluxDB +seotitle: Manage data processing tasks in InfluxDB +list_title: Manage tasks +description: > + InfluxDB provides options for creating, reading, updating, and deleting tasks + using the `influx` CLI, the InfluxDB UI, and the InfluxDB API. +influxdb/v2.5/tags: [tasks] +menu: + influxdb_2_5: + name: Manage tasks + parent: Process data +weight: 102 +--- + +InfluxDB provides multiple options for creating, reading, updating, and deleting (CRUD) tasks. +The following articles walk through managing tasks with the +InfluxDB user interface (UI), the `influx` command line interface (CLI), and the InfluxDB API. + +{{< children >}} diff --git a/content/influxdb/v2.5/process-data/manage-tasks/create-task.md b/content/influxdb/v2.5/process-data/manage-tasks/create-task.md new file mode 100644 index 000000000..27210beba --- /dev/null +++ b/content/influxdb/v2.5/process-data/manage-tasks/create-task.md @@ -0,0 +1,303 @@ +--- +title: Create a task +seotitle: Create a task for processing data in InfluxDB +description: > + Create a data processing task in InfluxDB using the InfluxDB UI or the `influx` CLI. +menu: + influxdb_2_5: + name: Create a task + parent: Manage tasks +weight: 201 +related: + - /influxdb/v2.5/reference/cli/influx/task/create +--- + +Create tasks with the InfluxDB user interface (UI), `influx` command line interface (CLI), or `/api/v2` API. + +_Before creating a task, review the [basics for writing a task](/influxdb/v2.5/process-data/get-started)._ + +- [InfluxDB UI](#create-a-task-in-the-influxdb-ui) +- [`influx` CLI](#create-a-task-using-the-influx-cli) +- [InfluxDB API](#create-a-task-using-the-influxdb-api) + +## Create a task in the InfluxDB UI + +The InfluxDB UI provides multiple ways to create a task: + +- [Create a task from the Data Explorer](#create-a-task-from-the-data-explorer) +- [Create a task in the Task UI](#create-a-task-in-the-task-ui) +- [Import a task](#import-a-task) +- [Create a task from a template](#create-a-task-from-a-template) +- [Clone a task](#clone-a-task) + +### Create a task from the Data Explorer + +1. In the navigation menu on the left, select **Data Explorer**. + + {{< nav-icon "data-explorer" >}} + +2. Build a query and click **Save As** in the upper right. +3. Select the **{{< caps >}}Task{{< /caps >}}** heading. +4. Specify the task options. See [Task options](/influxdb/v2.5/process-data/task-options) + for detailed information about each option. +5. Click **{{< caps >}}Save as Task{{< /caps >}}**. + +### Create a task in the Task UI + +1. In the navigation menu on the left, select **Tasks**. + + {{< nav-icon "tasks" >}} + +2. Click **{{< caps >}}{{< icon "plus" >}} Create Task{{< /caps >}}** in the upper right. +3. In the left panel, specify the task options. + See [Task options](/influxdb/v2.5/process-data/task-options) for detailed information about each option. +4. In the right panel, enter your task script. + + {{% note %}} + +##### Leave out the option tasks assignment + +When creating a _new_ task in the InfluxDB Task UI, leave the code editor empty. +When you save the task, the Task UI uses the [task options](/influxdb/v2.5/process-data/task-options/) you specify in the **Task options** form to populate `option task = {task_options}` for you. + +When you edit the saved task, you'll see the injected `option task = {task_options}`. + {{% /note %}} + +7. Click **Save** in the upper right. + +### Import a task + +1. In the navigation menu on the left, select **Tasks**. + + {{< nav-icon "tasks" >}} + +2. Click **{{< caps >}}{{< icon "plus" >}} Create Task{{< /caps >}}** in the upper right. +3. In the left panel, specify the task options. + See [Task options](/influxdb/v2.5/process-data/task-options) for detailed information about each option. +4. Paste a raw Flux task in the code editor to the right of the task options fields. +5. Click **{{< caps >}}Save{{< /caps >}}** in the upper right. + +### Create a task from a template + +1. In the navigation menu on the left, select **Settings** > **Templates**. + + {{< nav-icon "Settings" >}} + +2. Find the template you want to use and click its **Resources** list to expand the list of resources. +3. In the **Resources** list, click the task you want to use. + +### Clone a task + +1. In the navigation menu on the left, select **Tasks**. + + {{< nav-icon "tasks" >}} + +2. Find the task you would like to clone and click the **{{< icon "settings" >}}** icon located far right of the task name. +3. Click **Clone**. + +## Create a task using the influx CLI + +Use the `influx task create` command to create a new task. +It accepts either a file path or raw Flux. + +### Create a task using a file + +```sh +# Syntax +influx task create --org -f + +# Example +influx task create --org my-org -f /tasks/cq-mean-1h.flux +``` + +### Create a task using raw Flux + +```sh +influx task create --org my-org - # to open stdin pipe + +option task = { + name: "task-name", + every: 6h +} + +# ... Task script ... + +# to close the pipe and submit the command +``` + +## Create a task using the InfluxDB API + +{{% oss-only %}} +Use the [`/api/v2/tasks` InfluxDB API endpoint](/influxdb/v2.5/api/#operation/PostTasks) to create a task. + +{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/tasks/" >}} + +Provide the following in your API request: +##### Request headers + +- **Content-Type**: application/json +- **Authorization**: Token *`INFLUX_API_TOKEN`* + +##### Request body + +JSON object with the following fields: + +- **flux**: raw Flux task string that contains a [`task` option](/flux/v0.x/spec/options/) and a query. +- **orgID**: your [InfluxDB organization ID](/influxdb/v2.5/organizations/view-orgs/#view-your-organization-id) +- **status**: task status ("active" or "inactive") +- **description**: task description + +```sh +curl --request POST 'http://localhost:8086/api/v2/tasks' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Token INFLUX_API_TOKEN' \ + --data-raw '{ + "flux": "option task = {name: \"CPU Total 1 Hour New\", every: 1h}\n\nfrom(bucket: \"telegraf\")\n\t|> range(start: -1h)\n\t|> filter(fn: (r) =>\n\t\t(r._measurement == \"cpu\"))\n\t|> filter(fn: (r) =>\n\t\t(r._field == \"usage_system\"))\n\t|> filter(fn: (r) =>\n\t\t(r.cpu == \"cpu-total\"))\n\t|> aggregateWindow(every: 1h, fn: max)\n\t|> to(bucket: \"cpu_usage_user_total_1h\", org: \"INFLUX_ORG\")", + "orgID": "INFLUX_ORG_ID", + "status": "active", + "description": "This task downsamples CPU data every hour" +}' +``` + +{{% /oss-only %}} + +{{% cloud-only %}} + +An InfluxDB Cloud task can run either an [invokable script](/influxdb/cloud/api-guide/api-invokable-scripts/) or raw Flux stored in the task. + +- [Create a task that references a script](#create-a-task-that-references-a-script) +- [Create a task that contains a Flux script](#create-a-task-that-contains-a-flux-script) + +### Create a task that references a script + +With InfluxDB Cloud invokable scripts, you can manage, reuse, and invoke scripts as API endpoints. +You can use tasks to pass script parameters and schedule runs. + +Use the [`/api/v2/tasks` InfluxDB API endpoint](/influxdb/cloud/api/#operation/PostTasks) to create a task +that references a script ID. + +{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/tasks/" >}} + +Provide the following in your API request: + +#### Request headers + +- **Content-Type**: application/json +- **Authorization**: Token *`INFLUX_API_TOKEN`* + +#### Request body + +JSON object with the following fields: + +- **cron** or **every**: task schedule +- **name**: task name +- **scriptID**: [invokable script](/influxdb/cloud/api-guide/api-invokable-scripts/) ID + +```sh +curl --request POST 'https://cloud2.influxdata.com/api/v2/tasks' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Token INFLUX_API_TOKEN' \ + "cron": "0 * * * *", + "name": "downsample cpu", + "scriptID": "085a2960eaa20000", + "description": "This task downsamples CPU data every hour" +}' +``` + +To create a task that passes parameters when invoking the script, pass the _`scriptParameters`_ +property in the request body. +The following sample code creates a script with parameters, and then creates a +task to run the new script daily: + +```sh +SCRIPT_ID=$( +curl https://cloud2.influxdata.com/api/v2/scripts \ + --header "Authorization: Token INFLUX_API_TOKEN" \ + --header 'Accept: application/json' \ + --header 'Content-Type: application/json' \ + --data-binary @- << EOF | jq -r '.id' + { + "name": "filter-and-group19", + "description": "Returns filtered and grouped points from a bucket.", + "script": "from(bucket: params.bucket)\ + |> range(start: duration(v: params.rangeStart))\ + |> filter(fn: (r) => r._field == params.filterField)\ + |> group(columns: [params.groupColumn])", + "language": "flux" + } +EOF +) + +echo $SCRIPT_ID + +curl https://cloud2.influxdata.com/api/v2/tasks \ +--header "Content-type: application/json" \ +--header "Authorization: Token INFLUX_API_TOKEN" \ +--data @- << EOF + { + "name": "30-day-avg-temp", + "description": "IoT Center 30d temperature average.", + "every": "1d", + "scriptID": "${SCRIPT_ID}", + "scriptParameters": + { + "rangeStart": "-30d", + "bucket": "air_sensor", + "filterField": "temperature", + "groupColumn": "_time" + } + } +EOF +``` + +Replace **`INFLUX_API_TOKEN`** with your InfluxDB API token. + +### Create a task that contains a Flux script + +Use the [`/api/v2/tasks` InfluxDB API endpoint](/influxdb/cloud/api/#operation/PostTasks) to create a task that contains a Flux script with task options. + +{{< api-endpoint method="POST" endpoint="https://cloud2.influxdata.com/api/v2/tasks/" >}} + +Provide the following in your API request: + +#### Request headers + +- **Content-Type**: application/json +- **Authorization**: Token **`INFLUX_API_TOKEN`** + +#### Request body + +JSON object with the following fields: + +- **flux**: raw Flux task string that contains [`options`](/flux/v0.x/spec/options/) and the query. +- **status**: task status ("active" or "inactive") +- **description**: task description + +```sh +curl --request POST 'https://cloud2.influxdata.com/api/v2/tasks' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Token INFLUX_API_TOKEN' \ + --data-binary @- << EOF + { + "flux": "option task = {name: \"CPU Total 1 Hour New\", every: 1h}\ + from(bucket: \"telegraf\") + |> range(start: -1h) + |> filter(fn: (r) => (r._measurement == \"cpu\")) + |> filter(fn: (r) =>\n\t\t(r._field == \"usage_system\")) + |> filter(fn: (r) => (r.cpu == \"cpu-total\")) + |> aggregateWindow(every: 1h, fn: max) + |> to(bucket: \"cpu_usage_user_total_1h\", org: \"INFLUX_ORG\")", + "orgID": "INFLUX_ORG_ID", + "status": "active", + "description": "This task downsamples CPU data every hour" + } +EOF +``` + +Replace the following: + +- **`INFLUX_API_TOKEN`**: your InfluxDB [API token](/influxdb/cloud/security/tokens/view-tokens/) +- **`INFLUX_ORG`**: your InfluxDB organization name +- **`INFLUX_ORG_ID`**: your InfluxDB organization ID + +{{% /cloud-only %}} diff --git a/content/influxdb/v2.5/process-data/manage-tasks/delete-task.md b/content/influxdb/v2.5/process-data/manage-tasks/delete-task.md new file mode 100644 index 000000000..5facda43d --- /dev/null +++ b/content/influxdb/v2.5/process-data/manage-tasks/delete-task.md @@ -0,0 +1,48 @@ +--- +title: Delete a task +seotitle: Delete a task for processing data in InfluxDB +description: > + Delete a task from InfluxDB using the InfluxDB UI or the `influx` CLI. +menu: + influxdb_2_5: + name: Delete a task + parent: Manage tasks +weight: 206 +related: + - /influxdb/v2.5/reference/cli/influx/task/delete +--- + +## Delete a task in the InfluxDB UI +1. In the navigation menu on the left, select **Tasks**. + + {{< nav-icon "tasks" >}} + +2. In the list of tasks, hover over the task you want to delete. +3. Click **Delete** on the far right. +4. Click **Confirm**. + +## Delete a task with the influx CLI +Use the `influx task delete` command to delete a task. + +```sh +# Syntax +influx task delete -i + +# Example +influx task delete -i 0343698431c35000 +``` + +_To find the task ID, see [how to view tasks](/influxdb/v2.5/process-data/manage-tasks/view-tasks/)_ + +## Delete a task using the InfluxDB API + +Use the [`/tasks/TASK_ID` InfluxDB API endpoint](/influxdb/v2.5/api/#operation/DeleteTasksID) to delete a task and all associated records (task runs, logs, and labels). + +{{< api-endpoint method="DELETE" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID" >}} + +_To find the task ID, see [how to view tasks](/influxdb/v2.5/process-data/manage-tasks/view-tasks/)_ + +Once the task is deleted, InfluxDB cancels all scheduled runs of the task. + +If you want to disable a task instead of delete it, see how to +[update the task status](/influxdb/v2.5/process-data/manage-tasks/update-task/) to `inactive`. diff --git a/content/influxdb/v2.5/process-data/manage-tasks/export-task.md b/content/influxdb/v2.5/process-data/manage-tasks/export-task.md new file mode 100644 index 000000000..80e3eeebb --- /dev/null +++ b/content/influxdb/v2.5/process-data/manage-tasks/export-task.md @@ -0,0 +1,26 @@ +--- +title: Export a task +seotitle: Export an InfluxDB task +description: Export a data processing task from InfluxDB using the InfluxDB UI. +menu: + influxdb_2_5: + name: Export a task + parent: Manage tasks +weight: 205 +--- + +InfluxDB lets you export tasks from the InfluxDB user interface (UI). +Tasks are exported as downloadable JSON files. + +## Export a task in the InfluxDB UI +1. In the navigation menu on the left, select **Tasks**. + + {{< nav-icon "tasks" >}} + +2. In the list of tasks, hover over the task you would like to export and click + the **{{< icon "gear" >}}** icon that appears. +3. Select **Export**. +4. Downloading or save the task export file using one of the following options: + - Click **Download JSON** to download the exported JSON file. + - Click **Save as template** to save the export file as a task template. + - Click **Copy to Clipboard** to copy the raw JSON content to your machine's clipboard. diff --git a/content/influxdb/v2.5/process-data/manage-tasks/run-task.md b/content/influxdb/v2.5/process-data/manage-tasks/run-task.md new file mode 100644 index 000000000..486dffa5d --- /dev/null +++ b/content/influxdb/v2.5/process-data/manage-tasks/run-task.md @@ -0,0 +1,81 @@ +--- +title: Run a task +seotitle: Run an InfluxDB task +description: > + Run a data processing task using the InfluxDB UI or the `influx` CLI. +menu: + influxdb_2_5: + name: Run a task + parent: Manage tasks +weight: 203 +related: + - /influxdb/v2.5/reference/cli/influx/task/run + - /influxdb/v2.5/reference/cli/influx/task/run/retry + - /influxdb/v2.5/reference/cli/influx/task/retry-failed + - /influxdb/v2.5/api/#operation/PostTasksIDRuns + - /influxdb/v2.5/api/#operation/PostTasksIDRunsIDRetry +--- + +InfluxDB data processing tasks generally run in defined intervals or at a specific time, +however, you can manually run a task from the InfluxDB user interface (UI), +the `influx` command line interface (CLI), +or the InfluxDB `/api/v2` API. + +## Run a task from the InfluxDB UI +1. In the navigation menu on the left, select **Tasks**. + + {{< nav-icon "tasks" >}} + +2. Hover over the task you want to run and click the **{{< icon "gear" >}}** icon. +3. Select **Run Task**. + +## Run a task with the influx CLI +Use the `influx task run retry` command to run a task. + +{{% note %}} +To run a task from the `influx` CLI, the task must have already run at least once. +{{% /note %}} + +{{< cli/influx-creds-note >}} + +```sh +# List all tasks to find the ID of the task to run +influx task list + +# Use the task ID to list previous runs of the task +influx task run list --task-id=0000000000000000 + +# Use the task ID and run ID to retry a run +influx task run retry --task-id=0000000000000000 --run-id=0000000000000000 +``` + +### Retry failed task runs +Use the [`influx task retry-failed` command](/influxdb/v2.5/reference/cli/influx/task/retry-failed/) +to retry failed task runs. + +```sh +# Retry failed tasks for a specific task +influx task retry-failed \ + --id 0000000000000000 + +# Print information about runs that will be retried +influx task retry-failed \ + --dry-run + +# Retry failed task runs that occurred in a specific time range +influx task retry-failed \ + --after 2021-01-01T00:00:00Z \ + --before 2021-01-01T23:59:59Z +``` + +## Run a task with the InfluxDB API +Use the [`/tasks/TASK_ID/runs` +InfluxDB API endpoint](/influxdb/v2.5/api/#operation/PostTasksIDRuns) to manually start a task run. + +{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID/runs" >}} + +### Retry failed task runs +Use the [`/tasks/TASK_ID/runs/RUN_ID/retry` +InfluxDB API endpoint](/influxdb/v2.5/api/#operation/PostTasksIDRunsIDRetry) to retry a task run. + +{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID/runs/RUN_ID/retry" >}} diff --git a/content/influxdb/v2.5/process-data/manage-tasks/task-run-history.md b/content/influxdb/v2.5/process-data/manage-tasks/task-run-history.md new file mode 100644 index 000000000..d96dadea3 --- /dev/null +++ b/content/influxdb/v2.5/process-data/manage-tasks/task-run-history.md @@ -0,0 +1,86 @@ +--- +title: View task run history and logs +description: > + View task run histories and logs using the InfluxDB UI or the `influx` CLI. +menu: + influxdb_2_5: + name: View run history + parent: Manage tasks +weight: 203 +related: + - /influxdb/v2.5/reference/cli/influx/task/list + - /influxdb/v2.5/reference/cli/influx/task/run/list + - /influxdb/v2.5/reference/cli/influx/task/retry-failed +--- + +When an InfluxDB task runs, a _run_ record is created in the task's history. +Logs associated with each run provide relevant log messages, timestamps, +and the exit status of the run attempt. + +Use the InfluxDB user interface (UI), the `influx` command line interface (CLI), +or the InfluxDB `/api/v2` API to view task run histories and associated logs. + +{{% warn %}} +InfluxDB doesn’t guarantee that a task will run at the scheduled time. During busy +periods, tasks are added to the run queue and processed in order of submission. +The scheduled start time and actual start time can be viewed in the logs under +`scheduledFor` and `startedAt`. + +Task execution time doesn't affect the time range queried. Tasks will query +over the set time range as if executed on schedule regardless of delay. +{{% /warn %}} + +## View a task's run history in the InfluxDB UI + +1. In the navigation menu on the left, select **Tasks**. + + {{< nav-icon "tasks" >}} + +2. Hover over the task you want to run and click the **{{< icon "gear" >}}** icon. +3. Select **View Task Runs**. + +### View task run logs + +To view logs associated with a run, click **View Logs** next to the run in the task's run history. + +## View a task's run history with the influx CLI + +Use the `influx task run list` command to view a task's run history. + +```sh +# List all tasks to find the ID of the task to run +influx task list + +# Use the task ID to view the run history of a task +influx task run list --task-id=0000000000000000 +``` + +{{% note %}} +Detailed run logs are not currently available in the `influx` CLI. +{{% /note %}} + +To retry failed task runs, see how to [run tasks](/influxdb/v2.5/process-data/manage-tasks/run-task/). + +## View logs for a task with the InfluxDB API + +Use the [`/api/v2/tasks/TASK_ID/logs` +InfluxDB API endpoint](/influxdb/v2.5/api/#operation/GetTasksIDLogs) to view the log events for a task and exclude additional task metadata. + +{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID/logs" >}} + +## View a task's run history with the InfluxDB API + +Use the [`/tasks/TASK_ID/runs` +InfluxDB API endpoint](/influxdb/v2.5/api/#operation/GetTasksIDRuns) to view a task's run history. + +{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/tasks/{taskID}/runs" >}} + +### View task run logs with the InfluxDB API + +To view logs associated with a run, use the +[`/api/v2/tasks/TASK_ID/runs/RUN_ID/logs` InfluxDB API +endpoint](/influxdb/v2.5/api/#operation/GetTasksIDRunsIDLogs). + +{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID/runs/RUN_ID/logs" >}} + +To retry failed task runs, see how to [run tasks](/influxdb/v2.5/process-data/manage-tasks/run-task/). diff --git a/content/influxdb/v2.5/process-data/manage-tasks/update-task.md b/content/influxdb/v2.5/process-data/manage-tasks/update-task.md new file mode 100644 index 000000000..d254919f4 --- /dev/null +++ b/content/influxdb/v2.5/process-data/manage-tasks/update-task.md @@ -0,0 +1,87 @@ +--- +title: Update a task +seotitle: Update a task for processing data in InfluxDB +description: > + Update a data processing task in InfluxDB using the InfluxDB UI or the `influx` CLI. +menu: + influxdb_2_5: + name: Update a task + parent: Manage tasks +weight: 204 +related: + - /influxdb/v2.5/reference/cli/influx/task/update +--- + +## Update a task in the InfluxDB UI +1. In the navigation menu on the left, select **Tasks**. + + {{< nav-icon "tasks" "v2" >}} + +2. Find the task you would like to edit and click the **{{< icon "settings" >}}** icon located far right of the task name. +3. Click **Edit**. +4. Click **{{< caps >}}Save{{< /caps >}}** in the upper right. + +#### Update a task Flux script +1. In the list of tasks, click the **Name** of the task you want to update. +2. In the left panel, modify the task options. +3. In the right panel, modify the task script. +4. Click **{{< caps >}}Save{{< /caps >}}** in the upper right. + +#### Update the status of a task +In the list of tasks, click the {{< icon "toggle" >}} toggle to the left of the +task you want to activate or inactivate. + +#### Update a task description +1. In the list of tasks, hover over the name of the task you want to update. +2. Click the pencil icon {{< icon "pencil" >}}. +3. Click outside of the field or press `RETURN` to update. + +## Update a task with the influx CLI +Use the `influx task update` command to update or change the status of an existing task. + +_This command requires a task ID, which is available in the output of `influx task list`._ + +#### Update a task Flux script +Pass the file path of your updated Flux script to the `influx task update` command +with the ID of the task you want to update. +Modified [task options](/influxdb/v2.5/process-data/task-options) defined in the Flux +script are also updated. + +```sh +# Syntax +influx task update -i -f +``` + +```sh +# Example +influx task update -i 0343698431c35000 -f /tasks/cq-mean-1h.flux +``` + +#### Update the status of a task +Pass the ID of the task you want to update to the `influx task update` +command with the `--status` flag. + +_Possible arguments of the `--status` flag are `active` or `inactive`._ + +```sh +# Syntax +influx task update -i --status < active | inactive > +``` + +```sh +# Example +influx task update -i 0343698431c35000 --status inactive +``` + +## Update a task with the InfluxDB API +Use the [`/tasks/TASK_ID` +InfluxDB API endpoint](/influxdb/v2.5/api/#operation/PatchTasksID) to update properties of a task. + +{{< api-endpoint method="PATCH" endpoint="http://localhost:8086/api/v2/tasks/TASK_ID" >}} + +In your request, pass the task ID and an object that contains the updated key-value pairs. +To activate or inactivate a task, set the `status` property. +`"status": "inactive"` cancels scheduled runs and prevents manual runs of the task. +_To find the task ID, see [how to view tasks](/influxdb/v2.5/process-data/manage-tasks/view-tasks/)._ + +Once InfluxDB applies the update, it cancels all previously scheduled runs of the task. diff --git a/content/influxdb/v2.5/process-data/manage-tasks/view-tasks.md b/content/influxdb/v2.5/process-data/manage-tasks/view-tasks.md new file mode 100644 index 000000000..3d0ac23ba --- /dev/null +++ b/content/influxdb/v2.5/process-data/manage-tasks/view-tasks.md @@ -0,0 +1,44 @@ +--- +title: View tasks +seotitle: View created tasks that process data in InfluxDB +description: > + View existing data processing tasks using the InfluxDB UI or the `influx` CLI. +menu: + influxdb_2_5: + name: View tasks + parent: Manage tasks +weight: 202 +related: + - /influxdb/v2.5/reference/cli/influx/task/list +--- + +## View tasks in the InfluxDB UI +Click the **Tasks** icon in the left navigation to view the lists of tasks. + +{{< nav-icon "tasks" >}} + +### Filter the list of tasks + +1. Click the **Show Inactive** {{< icon "toggle" >}} toggle to include or exclude + inactive tasks in the list. +2. Enter text in the **Filter tasks** field to search for tasks by name or label. +3. Click the heading of any column to sort by that field. + +## View tasks with the influx CLI +Use the `influx task list` command to return a list of tasks. + +```sh +influx task list +``` + +#### Filter tasks using the CLI +Other filtering options such as filtering by organization or user, +or limiting the number of tasks returned, are available. +See the [`influx task list` documentation](/influxdb/v2.5/reference/cli/influx/task/list) +for information about other available flags. + +## View tasks with the InfluxDB API +Use the [`/tasks` InfluxDB API endpoint](/influxdb/v2.5/api/#operation/GetTasks) +to return a list of tasks. + +{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/tasks" >}} \ No newline at end of file diff --git a/content/influxdb/v2.5/process-data/task-options.md b/content/influxdb/v2.5/process-data/task-options.md new file mode 100644 index 000000000..82a14fec4 --- /dev/null +++ b/content/influxdb/v2.5/process-data/task-options.md @@ -0,0 +1,156 @@ +--- +title: Task configuration options +seotitle: InfluxDB task configuration options +description: > + Task options define specific information about a task such as its name, + the schedule on which it runs, execution delays, and others. +menu: + influxdb_2_5: + name: Task options + parent: Process data +weight: 105 +influxdb/v2.5/tags: [tasks, flux] +--- + +Task options define specific information about a task. +They are set in a Flux script {{% cloud-only %}}, in the InfluxDB API, {{% /cloud-only %}} or in the InfluxDB user interface (UI). +The following task options are available: + +- [name](#name) +- [every](#every) +- [cron](#cron) +- [offset](#offset) + +{{% note %}} +`every` and `cron` are mutually exclusive, but at least one is required. +{{% /note %}} + +## name + +The name of the task. _**Required**_. + +_**Data type:** String_ + +In Flux: + +```js +option task = { + name: "taskName", + // ... +} +``` + +{{% cloud-only %}} +In a `/api/v2/tasks` request body with `scriptID`: + +```json +{ + "scriptID": "SCRIPT_ID", + "name": "TASK_NAME" + ... +} +``` + +Replace `SCRIPT_ID` with the ID of your InfluxDB invokable script. +{{% /cloud-only %}} + +## every + +The interval at which the task runs. This option also determines when the task first starts to run, depending on the specified time (in [duration literal](/{{< latest "flux" >}}/spec/lexical-elements/#duration-literals)). + +_**Data type:** Duration_ + +For example, if you save or schedule a task at 2:30 and run the task every hour (`1h`): + +`option task = {name: "aggregation", every: 1h}` + +The task first executes at 3:00pm, and subsequently every hour after that. + +In Flux: + +```js +option task = { + // ... + every: 1h, +} +``` + +{{% cloud-only %}} +In a `/api/v2/tasks` request body with `scriptID`: + +```json +{ + "scriptID": "SCRIPT_ID", + "every": "1h" + ... +} +``` + +{{% /cloud-only %}} + +{{% note %}} +In the InfluxDB UI, use the **Interval** field to set this option. +{{% /note %}} + +## cron + +The [cron expression](https://en.wikipedia.org/wiki/Cron#Overview) that +defines the schedule on which the task runs. +Cron scheduling is based on system time. + +_**Data type:** String_ + +In Flux: + +```js +option task = { + // ... + cron: "0 * * * *", +} +``` + +{{% cloud-only %}} +In a `/api/v2/tasks` request body with `scriptID`: + +```json +{ + "scriptID": "SCRIPT_ID", + "cron": "0 * * * *", + ... +} +``` + +{{% /cloud-only %}} + +## offset + +Delays the execution of the task but preserves the original time range. +For example, if a task is to run on the hour, a `10m` offset will delay it to 10 +minutes after the hour, but all time ranges defined in the task are relative to +the specified execution time. +A common use case is offsetting execution to account for data that may arrive late. + +_**Data type:** Duration_ + +In Flux: + +```js +option task = { + // ... + offset: 10m, +} +``` + +{{% cloud-only %}} + +In a `/api/v2/tasks` request body with `scriptID`: + +```json +{ + "scriptID": "SCRIPT_ID", + "offset": "10m", + ... +} +``` + +{{% /cloud-only %}} \ No newline at end of file diff --git a/content/influxdb/v2.5/query-data/_index.md b/content/influxdb/v2.5/query-data/_index.md new file mode 100644 index 000000000..fbd7faa14 --- /dev/null +++ b/content/influxdb/v2.5/query-data/_index.md @@ -0,0 +1,21 @@ +--- +title: Query data in InfluxDB +seotitle: Query data stored in InfluxDB +description: > + Learn to query data stored in InfluxDB using Flux and tools such as the InfluxDB + user interface and the 'influx' command line interface. +aliases: + - /influxdb/v2.5/query_language/data_exploration/ +menu: + influxdb_2_5: + name: Query data +weight: 5 +influxdb/v2.5/tags: [query, flux] +--- + +Learn to query data stored in InfluxDB using Flux and tools such as the InfluxDB +user interface and the 'influx' command line interface. + +{{< children >}} + +{{< influxdbu "influxdb-101" >}} diff --git a/content/influxdb/v2.5/query-data/common-queries/_index.md b/content/influxdb/v2.5/query-data/common-queries/_index.md new file mode 100644 index 000000000..36caab5cc --- /dev/null +++ b/content/influxdb/v2.5/query-data/common-queries/_index.md @@ -0,0 +1,22 @@ +--- +title: Common queries +seotitle: Common queries with Flux +description: > + This collection of articles walks through common use cases for Flux queries. +influxdb/v2.5/tags: [queries] +menu: + influxdb_2_5: + name: Common queries + parent: Query data +weight: 104 +--- + +The following articles walk through common queries using the +[NOAA water database data](/influxdb/v2.5/reference/sample-data/#noaa-water-sample-data). + +{{< children >}} + +{{% note %}} +This list will continue to grow. +If you have suggestions, please [submit them to the InfluxData Community](https://community.influxdata.com/c/influxdb2). +{{% /note %}} diff --git a/content/influxdb/v2.5/query-data/common-queries/compare-values.md b/content/influxdb/v2.5/query-data/common-queries/compare-values.md new file mode 100644 index 000000000..367782248 --- /dev/null +++ b/content/influxdb/v2.5/query-data/common-queries/compare-values.md @@ -0,0 +1,48 @@ +--- +title: Comparing values from different buckets +seotitle: Compare the last measurement to a mean stored in another bucket +description: > + Compare the value from the latest point to an average value stored in another bucket. This is useful when using the average value to calculate a threshold check. +influxdb/v2.5/tags: [queries] +menu: + influxdb_2_5: + name: Compare values from different buckets + parent: Common queries +weight: 104 +--- + +{{% note %}} +This example uses [NOAA water sample data](/influxdb/v2.5/reference/sample-data/#noaa-water-sample-data). +{{% /note %}} + +This example compares the value from the latest point to an average value stored in another bucket. This is useful when using the average value to calculate a [threshold check](/influxdb/v2.5/monitor-alert/checks/create/#threshold-check). + +The following query: + +- Uses [`range()`](/{{< latest "flux" >}}/stdlib/universe/range/) to define a time range. +- Gets the last value in the `means` bucket and compares it to the last value in the `noaa` bucket using [`last()`](/{{< latest "flux" >}}/stdlib/universe/last/). +- Uses [`join()`](/{{< latest "flux" >}}/stdlib/universe/join/) to combine the results +- Uses [`map()`](/{{< latest "flux" >}}/stdlib/universe/map/) to calculate the differences + +```js +means = from(bucket: "weekly_means") + |> range(start: 2019-09-01T00:00:00Z) + |> last() + |> keep(columns: ["_value", "location"]) + +latest = from(bucket: "noaa") + |> range(start: 2019-09-01T00:00:00Z) + |> filter(fn: (r) => r._measurement == "average_temperature") + |> last() + |> keep(columns: ["_value", "location"]) + +join(tables: {mean: means, reading: latest}, on: ["location"]) + |> map(fn: (r) => ({r with deviation: r._value_reading - r._value_mean})) +``` + +### Example results + +| location | _value_mean | _value_reading | deviation | +|:-------- | -----------: | --------------:| ---------: | +| coyote_creek | 79.82710622710623 | 89 | 9.172893772893772 | +| santa_monica | 80.20451339915374 | 85 | 4.79548660084626 | diff --git a/content/influxdb/v2.5/query-data/common-queries/iot-common-queries.md b/content/influxdb/v2.5/query-data/common-queries/iot-common-queries.md new file mode 100644 index 000000000..75a44758a --- /dev/null +++ b/content/influxdb/v2.5/query-data/common-queries/iot-common-queries.md @@ -0,0 +1,213 @@ +--- +title: IoT sensor common queries +description: > + Use Flux to address common IoT use cases that query data collected from sensors. +influxdb/v2.5/tags: [queries] +menu: + influxdb_2_5: + name: IoT common queries + parent: Common queries +weight: 205 +--- + +The following scenarios illustrate common queries used to extract information from IoT sensor data: + +- [Calculate time in state](#calculate-time-in-state) +- [Calculate time weighted average](#calculate-time-weighted-average) +- [Calculate value between events](#calculate-value-between-events) +- [Determine a state within existing values](#determine-a-state-within-existing-values) + +All scenarios below use the `machineProduction` sample dataset provided by the [InfluxDB `sample` package](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/sample/). +For more information, see [Sample data](/influxdb/cloud/reference/sample-data/). + +## Calculate time in state + +In this scenario, we look at whether a production line is running smoothly (`state`=`OK`) and what percentage of time the production line is running smoothly or not (`state`=`NOK`). If no points are recorded during the interval (`state`=`NaN`), you may opt to retrieve the last state prior to the interval. + +To visualize the time in state, see the [Mosaic visualization](#mosaic-visualization). + +**To calculate the percentage of time a machine spends in each state** + +1. Import the [`contrib/tomhollingworth/events` package](/{{< latest "flux" >}}/stdlib/contrib/tomhollingworth/events/). +1. Query the `state` field. +2. Use `events.duration()` to return the amount of time (in a specified unit) between each data point, and store the interval in the `duration` column. +3. Group columns by the status value column (in this case `_value`), `_start`, `_stop`, and other relevant dimensions. +4. Sum the `duration` column to calculate the total amount of time spent in each state. +5. Pivot the summed durations into the `_value` column. +6. Use `map()` to calculate the percentage of time spent in each state. + +```js +import "contrib/tomhollingworth/events" + +from(bucket: "machine") + |> range(start: 2021-08-01T00:00:00Z, stop: 2021-08-02T00:30:00Z) + |> filter(fn: (r) => r["_measurement"] == "machinery") + |> filter(fn: (r) => r["_field"] == "state") + |> events.duration(unit: 1h, columnName: "duration") + |> group(columns: ["_value", "_start", "_stop", "station_id"]) + |> sum(column: "duration") + |> pivot(rowKey: ["_stop"], columnKey: ["_value"], valueColumn: "duration") + |> map( + fn: (r) => { + totalTime = float(v: r.NOK + r.OK) + + return {r with NOK: float(v: r.NOK) / totalTime * 100.0, OK: float(v: r.OK) / totalTime * 100.0} + }, + ) +``` + +The query above focuses on a specific time range of state changes reported in the production line. + +- `range()` defines the time range to query. +- `filter()` defines the field (`state`) and measurement (`machinery`) to filter by. +- `events.duration()` calculates the time between points. +- `group()` regroups the data by the field value, so points with `OK` and `NOK` field values are grouped into separate tables. +- `sum()` returns the sum of durations spent in each state. + +The output of the query at this point is: + +| _value | duration | +| ------ | -------: | +| NOK | 22 | + +| _value | duration | +| ------ | -------: | +| OK | 172 | + +`pivot()` creates columns for each unique value in the `_value` column, and then assigns the associated duration as the column value. +The output of the pivot operation is: + +| NOK | OK | +| :-- | :-- | +| 22 | 172 | + +Given the output above, `map()` does the following: + +1. Adds the `NOK` and `OK` values to calculate `totalTime`. +2. Divides `NOK` by `totalTime`, and then multiplies the quotient by 100. +3. Divides `OK` by `totalTime`, and then multiplies the quotient by 100. + +This returns: + +| NOK | OK | +| :---------------- | :----------------- | +| 11.34020618556701 | 88.65979381443299 | + +The result shows that 88.66% of time production is in the `OK` state, and that 11.34% of time, production is in the `NOK` state. + +#### Mosaic visualization + +The [mosaic visualization](/influxdb/v2.5/visualize-data/visualization-types/mosaic/) displays state changes over time. In this example, the mosaic visualization displays different colored tiles based on the `state` field. + +```js +from(bucket: "machine") + |> range(start: 2021-08-01T00:00:00Z, stop: 2021-08-02T00:30:00Z) + |> filter(fn: (r) => r._measurement == "machinery") + |> filter(fn: (r) => r._field == "state") + |> aggregateWindow(every: v.windowPeriod, fn: last, createEmpty: false) +``` + +When visualizing data, it is possible to have more data points than available pixels. To divide data into time windows that span a single pixel, use `aggregateWindow` with the `every` parameter set to `v.windowPeriod`. +Use `last` as the aggregate `fn` to return the last value in each time window. +Set `createEmpty` to `false` so results won't include empty time windows. + + +## Calculate time weighted average + +To calculate the time-weighted average of data points, use the [`timeWeightedAvg()` function](/{{< latest "flux" >}}/stdlib/universe/timeweightedavg/). + +The example below queries the `oil_temp` field in the `machinery` measurement. The `timeWeightedAvg()` function returns the time-weighted average of oil temperatures based on 5 second intervals. + +```js +from(bucket: "machine") + |> range(start: 2021-08-01T00:00:00Z, stop: 2021-08-01T00:00:30Z) + |> filter(fn: (r) => r._measurement == "machinery" and r._field == "oil_temp") + |> timeWeightedAvg(unit: 5s) +``` + +##### Output data + +| stationID | _start | _stop | _value | +|:----- | ----- | ----- | ------:| +| g1 | 2021-08-01T01:00:00.000Z | 2021-08-01T00:00:30.000Z | 40.25396118491921 | +| g2 | 2021-08-01T01:00:00.000Z | 2021-08-01T00:00:30.000Z | 40.6 | +| g3 | 2021-08-01T01:00:00.000Z | 2021-08-01T00:00:30.000Z | 41.384505595567866 | +| g4 | 2021-08-01T01:00:00.000Z | 2021-08-01T00:00:30.000Z | 41.26735518634935 | + + +## Calculate value between events + +Calculate the value between events by getting the average value during a specific time range. + +The following scenario queries data starting when four production lines start and end. +The following query calculates the average oil temperature for each grinding station during that period. + +```js +batchStart = 2021-08-01T00:00:00Z +batchStop = 2021-08-01T00:00:20Z + +from(bucket: "machine") + |> range(start: batchStart, stop: batchStop) + |> filter(fn: (r) => r._measurement == "machinery" and r._field == "oil_temp") + |> mean() +``` + +##### Output + +| stationID | _start | _stop | _value | +|:----- | ----- | ----- | ------:| +| g1 | 2021-08-01T01:00:00.000Z | 2021-08-02T00:00:00.000Z | 40 | +| g2 | 2021-08-01T01:00:00.000Z | 2021-08-02T00:00:00.000Z | 40.6 | +| g3 | 2021-08-01T01:00:00.000Z | 2021-08-02T00:00:00.000Z | 41.379999999999995 | +| g4 | 2021-08-01T01:00:00.000Z | 2021-08-02T00:00:00.000Z | 41.2 | + + +## Determine a state with existing values + +Use multiple existing values to determine a state. +The following example calculates a state based on the difference between the `pressure` and `pressure-target` fields in the machine-production sample data. +To determine a state by comparing existing fields: + +1. Query the fields to compare (in this case, `pressure` and `pressure_target`). +2. (Optional) Use `aggregateWindow()` to window data into time-based windows and + apply an aggregate function (like `mean()`) to return values that represent larger windows of time. +3. Use `pivot()` to shift field values into columns. +4. Use `map()` to compare or operate on the different field column values. +5. Use `map()` to assign a status (in this case, `needsMaintenance` based on the relationship of the field column values. + +```js +import "math" + +from(bucket: "machine") + |> range(start: 2021-08-01T00:00:00Z, stop: 2021-08-02T00:00:00Z) + |> filter(fn: (r) => r["_measurement"] == "machinery") + |> filter(fn: (r) => r["_field"] == "pressure" or r["_field"] == "pressure_target") + |> aggregateWindow(every: 12h, fn: mean) + |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") + |> map(fn: (r) => ({ r with pressureDiff: r.pressure - r.pressure_target })) + |> map(fn: (r) => ({ r with needsMaintenance: if math.abs(x: r.pressureDiff) >= 15.0 then true else false })) +``` + +##### Output + +| _time | needsMaintenance | pressure | pressure_target | pressureDiff | stationID | +| :----------------------- | :--------------- | -----------------: | -----------------: | ------------------: | --------: | +| 2021-08-01T12:00:00.000Z | false | 101.83929080014092 | 104.37786394078252 | -2.5385731406416028 | g1 | +| 2021-08-02T00:00:00.000Z | false | 96.04368008245874 | 102.27698650674662 | -6.233306424287889 | g1 | + +| _time | needsMaintenance | pressure | pressure_target | pressureDiff | stationID | +| :----------------------- | :--------------- | -----------------: | -----------------: | ------------------: | --------: | +| 2021-08-01T12:00:00.000Z | false | 101.62490431541765 | 104.83915260886623 | -3.214248293448577 | g2 | +| 2021-08-02T00:00:00.000Z | false | 94.52039415465273 | 105.90869375273046 | -11.388299598077722 | g2 | + +| _time | needsMaintenance | pressure | pressure_target | pressureDiff | stationID | +| :----------------------- | :--------------- | -----------------: | -----------------: | ------------------: | --------: | +| 2021-08-01T12:00:00.000Z | false | 92.23774168403503 | 104.81867444768653 | -12.580932763651504 | g3 | +| 2021-08-02T00:00:00.000Z | true | 89.20867846153847 | 108.2579185520362 | -19.049240090497733 | g3 | + +| _time | needsMaintenance | pressure | pressure_target | pressureDiff | stationID | +| :----------------------- | :--------------- | -----------------: | -----------------: | ------------------: | --------: | +| 2021-08-01T12:00:00.000Z | false | 94.40834093349847 | 107.6827757125155 | -13.274434779017028 | g4 | +| 2021-08-02T00:00:00.000Z | true | 88.61785638936534 | 108.25471698113208 | -19.636860591766734 | g4 | + +The table reveals that the `pressureDiff` value `-19.636860591766734` from station g4 and `-19.049240090497733` from station g3 are higher than 15, therefore there is a change in state that marks the `needMaintenance` value as "true" and would require that station to need work to turn that value back to `false`. \ No newline at end of file diff --git a/content/influxdb/v2.5/query-data/common-queries/multiple-fields-in-calculations.md b/content/influxdb/v2.5/query-data/common-queries/multiple-fields-in-calculations.md new file mode 100644 index 000000000..ecfbc2a9a --- /dev/null +++ b/content/influxdb/v2.5/query-data/common-queries/multiple-fields-in-calculations.md @@ -0,0 +1,106 @@ +--- +title: Use multiple fields in a calculation +description: > + Query multiple fields, pivot results, and use multiple field values to + calculate new values in query results. +influxdb/v2.5/tags: [queries] +menu: + influxdb_2_5: + parent: Common queries +weight: 103 +--- + +To use values from multiple fields in a mathematic calculation, complete the following steps: + +1. [Filter by fields required in your calculation](#filter-by-fields) +2. [Pivot fields into columns](#pivot-fields-into-columns) +3. [Perform the mathematic calculation](#perform-the-calculation) + +## Filter by fields +Use [`filter()`](/{{< latest "flux" >}}/stdlib/universe/filter/) +to return only the fields necessary for your calculation. +Use the [`or` logical operator](/{{< latest "flux" >}}/spec/operators/#logical-operators) +to filter by multiple fields. + +The following example queries two fields, `A` and `B`: + +```js +from(bucket: "example-bucket") + |> range(start: -1m) + |> filter(fn: (r) => r._field == "A" or r._field == "B") +``` + +This query returns one or more tables for each field. For example: + +{{< flex >}} +{{% flex-content %}} +| _time | _field | _value | +|:----- |:------:| ------:| +| 2021-01-01T00:00:00Z | A | 12.4 | +| 2021-01-01T00:00:15Z | A | 12.2 | +| 2021-01-01T00:00:30Z | A | 11.6 | +| 2021-01-01T00:00:45Z | A | 11.9 | +{{% /flex-content %}} +{{% flex-content %}} +| _time | _field | _value | +|:----- |:------:| ------:| +| 2021-01-01T00:00:00Z | B | 3.1 | +| 2021-01-01T00:00:15Z | B | 4.8 | +| 2021-01-01T00:00:30Z | B | 2.2 | +| 2021-01-01T00:00:45Z | B | 3.3 | +{{% /flex-content %}} +{{< /flex >}} + +## Pivot fields into columns +Use [`pivot()`](/{{< latest "flux" >}}/stdlib/universe/pivot/) +to align multiple fields by time. + +{{% note %}} +To correctly pivot on `_time`, points for each field must have identical timestamps. +If timestamps are irregular or do not align perfectly, see +[Normalize irregular timestamps](/influxdb/v2.5/query-data/flux/manipulate-timestamps/#normalize-irregular-timestamps). +{{% /note %}} + +```js +// ... + |> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value") +``` + +Using the queried data [above](#filter-by-fields), this `pivot()` function returns: + +| _time | A | B | +|:----- | ------:| ------:| +| 2021-01-01T00:00:00Z | 12.4 | 3.1 | +| 2021-01-01T00:00:15Z | 12.2 | 4.8 | +| 2021-01-01T00:00:30Z | 11.6 | 2.2 | +| 2021-01-01T00:00:45Z | 11.9 | 3.3 | + +## Perform the calculation +Use [`map()`](/{{< latest "flux" >}}/stdlib/universe/map/) +to perform the mathematic operation using column values as operands. + +The following example uses values in the `A` and `B` columns to calculate a new `_value` column: + +```js +// ... + |> map(fn: (r) => ({ r with _value: r.A * r.B })) +``` + +Using the pivoted data above, this `map()` function returns: + +| _time | A | B | _value | +|:----- | ------:| ------:| ------:| +| 2021-01-01T00:00:00Z | 12.4 | 3.1 | 38.44 | +| 2021-01-01T00:00:15Z | 12.2 | 4.8 | 58.56 | +| 2021-01-01T00:00:30Z | 11.6 | 2.2 | 25.52 | +| 2021-01-01T00:00:45Z | 11.9 | 3.3 | 39.27 | + +## Full example query + +```js +from(bucket: "example-bucket") + |> range(start: -1m) + |> filter(fn: (r) => r._field == "A" or r._field == "B") + |> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value") + |> map(fn: (r) => ({r with _value: r.A * r.B})) +``` diff --git a/content/influxdb/v2.5/query-data/common-queries/operate-on-columns.md b/content/influxdb/v2.5/query-data/common-queries/operate-on-columns.md new file mode 100644 index 000000000..54b1a6244 --- /dev/null +++ b/content/influxdb/v2.5/query-data/common-queries/operate-on-columns.md @@ -0,0 +1,137 @@ +--- +title: Operate on columns +description: > + Find and count unique values, recalculate the `_value` column, and use values to calculate a new column. +influxdb/v2.5/tags: [queries] +aliases: + - /influxdb/v2.5/query-data/common-queries/count_unique_values_for_column/ + - /influxdb/v2.5/query-data/common-queries/recalculate_value_column/ + - /influxdb/v2.5/query-data/common-queries/calculate_new_column/ +menu: + influxdb_2_5: + name: Operate on columns + parent: Common queries +weight: 100 +--- + +Use the following common queries to operate on columns: + +- [Find and count unique values in a column](#find-and-count-unique-values-in-a-column) +- [Recalculate the _values column](#recalculate-the-_values-column) +- [Calculate a new column](#calculate-a-new-column) + +{{% note %}} +These examples use [NOAA water sample data](/influxdb/v2.5/reference/sample-data/#noaa-water-sample-data). +{{% /note %}} + +## Find and count unique values in a column + +Find and count the number of unique values in a specified column. +The following examples find and count unique locations where data was collected. + +### Find unique values + +This query: + + - Uses [`group()`](/{{< latest "flux" >}}/stdlib/universe/group/) to ungroup data and return results in a single table. + - Uses [`keep()`](/{{< latest "flux" >}}/stdlib/universe/keep/) and [`unique()`](/{{< latest "flux" >}}/stdlib/universe/selectors/unique/) to return unique values in the specified column. + +```js +from(bucket: "noaa") + |> range(start: -30d) + |> group() + |> keep(columns: ["location"]) + |> unique(column: "location") +``` + +#### Example results +| location | +|:-------- | +| coyote_creek | +| santa_monica | + +### Count unique values + +This query: + +- Uses [`group()`](/{{< latest "flux" >}}/stdlib/universe/group/) to ungroup data and return results in a single table. +- Uses [`keep()`](/{{< latest "flux" >}}/stdlib/universe/keep/), [`unique()`](/{{< latest "flux" >}}/stdlib/universe/unique/), and then [`count()`](/{{< latest "flux" >}}/stdlib/universe/count/) to count the number of unique values. + +```js +from(bucket: "noaa") + |> group() + |> unique(column: "location") + |> count(column: "location") +``` + +#### Example results + +| location | +| ---------:| +| 2 | + + +## Recalculate the _values column + +To recalculate the `_value` column, use the `with` operator in [`map()`](/{{< latest "flux" >}}/stdlib/universe/map/) to overwrite the existing `_value` column. + +The following query: + + - Uses [`filter()`](/{{< latest "flux" >}}/stdlib/universe/filter/) to filter the `average_temperature` measurement. + - Uses [`map()`](/{{< latest "flux" >}}/stdlib/universe/map/) to convert Fahrenheit temperature values into Celsius. + +```js + +from(bucket: "noaa") + |> filter(fn: (r) => r._measurement == "average_temperature") + |> range(start: -30d) + |> map(fn: (r) => ({r with _value: (float(v: r._value) - 32.0) * 5.0 / 9.0} )) +``` + +| _field | _measurement | _start | _stop | _time | location | _value | +|:------ |:------------ |:------ |:----- |:----- |:-------- | ------: | +| degrees | average_temperature | 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | 2019-08-17T00:00:00Z | coyote_creek | 27.77777777777778 | +| degrees | average_temperature | 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | 2019-08-17T00:06:00Z | coyote_creek | 22.77777777777778 | +| degrees | average_temperature | 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | 2019-08-17T00:12:00Z | coyote_creek | 30 | +| degrees | average_temperature | 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | 2019-08-17T00:18:00Z | coyote_creek | 31.666666666666668 | +| degrees | average_temperature | 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | 2019-08-17T00:24:00Z | coyote_creek | 25 | +| degrees | average_temperature | 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | 2019-08-17T00:30:00Z | coyote_creek | 21.11111111111111 | +| degrees | average_temperature | 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | 2019-08-17T00:36:00Z | coyote_creek | 28.88888888888889 | +| degrees | average_temperature | 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | 2019-08-17T00:42:00Z | coyote_creek | 24.444444444444443 | +| degrees | average_temperature | 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | 2019-08-17T00:48:00Z | coyote_creek | 29.444444444444443 | +| degrees | average_temperature | 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | 2019-08-17T00:54:00Z | coyote_creek | 26.666666666666668 | +| degrees | average_temperature | 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | 2019-08-17T01:00:00Z | coyote_creek | 21.11111111111111 | +| ••• | ••• | ••• | ••• | ••• | ••• | ••• | + +## Calculate a new column + +To use values in a row to calculate and add a new column, use `map()`. +This example below converts temperature from Fahrenheit to Celsius and maps the Celsius value to a new `celsius` column. + +The following query: + + - Uses [`filter()`](/{{< latest "flux" >}}/stdlib/universe/filter/) to filter the `average_temperature` measurement. + - Uses [`map()`](/{{< latest "flux" >}}/stdlib/universe/map/) to create a new column calculated from existing values in each row. + +```js +from(bucket: "noaa") + |> filter(fn: (r) => r._measurement == "average_temperature") + |> range(start: -30d) + |> map(fn: (r) => ({r with celsius: (r._value - 32.0) * 5.0 / 9.0})) +``` + +#### Example results + +| _start | _stop | _field | _measurement | location | _time | _value | celsius | +|:------ |:----- |:------: |:------------: |:--------: |:----- | ------:| -------:| +| 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | degrees | average_temperature | coyote_creek | 2019-08-17T00:00:00Z | 82 | 27.78 | +| 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | degrees | average_temperature | coyote_creek | 2019-08-17T00:06:00Z | 73 | 22.78 | +| 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | degrees | average_temperature | coyote_creek | 2019-08-17T00:12:00Z | 86 | 30.00 | +| 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | degrees | average_temperature | coyote_creek | 2019-08-17T00:18:00Z | 89 | 31.67 | +| 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | degrees | average_temperature | coyote_creek | 2019-08-17T00:24:00Z | 77 | 25.00 | +| 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | degrees | average_temperature | coyote_creek | 2019-08-17T00:30:00Z | 70 | 21.11 | +| 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | degrees | average_temperature | coyote_creek | 2019-08-17T00:36:00Z | 84 | 28.89 | +| 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | degrees | average_temperature | coyote_creek | 2019-08-17T00:42:00Z | 76 | 24.44 | +| 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | degrees | average_temperature | coyote_creek | 2019-08-17T00:48:00Z | 85 | 29.44 | +| 1920-03-05T22:10:01Z | 2020-03-05T22:10:01Z | degrees | average_temperature | coyote_creek | 2019-08-17T00:54:00Z | 80 | 26.67 | +| ••• | ••• | ••• | ••• | ••• | ••• | ••• | ••• | diff --git a/content/influxdb/v2.5/query-data/execute-queries/_index.md b/content/influxdb/v2.5/query-data/execute-queries/_index.md new file mode 100644 index 000000000..250cd1bbb --- /dev/null +++ b/content/influxdb/v2.5/query-data/execute-queries/_index.md @@ -0,0 +1,17 @@ +--- +title: Execute queries +seotitle: Different ways to query InfluxDB +description: There are multiple ways to query data from InfluxDB including the InfluxDB UI, CLI, and API. +weight: 103 +menu: + influxdb_2_5: + name: Execute queries + parent: Query data +influxdb/v2.5/tags: [query] +--- + +There are multiple ways to execute queries with InfluxDB. Choose from the following options: + +{{< children >}} + + diff --git a/content/influxdb/v2.5/query-data/execute-queries/data-explorer.md b/content/influxdb/v2.5/query-data/execute-queries/data-explorer.md new file mode 100644 index 000000000..05bdd555e --- /dev/null +++ b/content/influxdb/v2.5/query-data/execute-queries/data-explorer.md @@ -0,0 +1,101 @@ +--- +title: Query in Data Explorer +description: > + Query InfluxDB using the InfluxDB user interface (UI) Data Explorer. Discover how to query data in InfluxDB 2.1 using the InfluxDB UI. +aliases: + - /influxdb/v2.5/visualize-data/explore-metrics/ +weight: 201 +menu: + influxdb_2_5: + name: Query with Data Explorer + parent: Execute queries +influxdb/v2.5/tags: [query] +--- + +Build, execute, and visualize your queries in InfluxDB UI's **Data Explorer**. + +![Data Explorer with Flux](/img/influxdb/2-0-data-explorer.png) + +Move seamlessly between using the Flux builder or templates and manually editing the query. +Choose between [visualization types](/influxdb/v2.5/visualize-data/visualization-types/) for your query. + +## Query data with Flux and the Data Explorer + +Flux is a functional data scripting language designed for querying, +analyzing, and acting on time series data. +See [Get started with Flux](/influxdb/v2.5/query-data/get-started) to learn more about Flux. + +1. In the navigation menu on the left, click **Data Explorer**. + + {{< nav-icon "data-explorer" >}} + +2. Use the Flux builder in the bottom panel to create a Flux query: + - Select a bucket to define your data source or select `+ Create Bucket` to add a new bucket. + - Edit your time range with the [time range option](#select-time-range) in the dropdown menu. + - Add filters to narrow your data by selecting attributes or columns in the dropdown menu. + - Select **Group** from the **Filter** dropdown menu to group data into tables. For more about how grouping data in Flux works, see [Group data](/influxdb/v2.5/query-data/flux/group-data/). +3. Alternatively, click **Script Editor** to manually edit the query. + To switch back to the query builder, click **Query Builder**. Note that your updates from the Script Editor will not be saved. +4. Use the **Functions** list to review the available Flux functions. + Click a function from the list to add it to your query. +5. Click **Submit** (or press `Control+Enter`) to run your query. You can then preview your graph in the above pane. + To cancel your query while it's running, click **Cancel**. +6. To work on multiple queries at once, click the {{< icon "plus" >}} to add another tab. + - Click the eye icon on a tab to hide or show a query's visualization. + - Click the name of the query in the tab to rename it. + +## Visualize your query + +- Select an available [visualization types](/influxdb/v2.5/visualize-data/visualization-types/) from the dropdown menu in the upper-left: + + {{< img-hd src="/img/influxdb/2-0-visualizations-dropdown.png" title="Visualization dropdown" />}} + +## Control your dashboard cell + +To open the cell editor overlay, click the gear icon in the upper right of a cell and select **Configure**. + The cell editor overlay opens. + +### View raw data + +Toggle the **View Raw Data** {{< icon "toggle" >}} option to see your data in table format instead of a graph. Scroll through raw data using arrows, or click page numbers to find specific tables. [Group keys](/influxdb/cloud/reference/glossary/#group-key) and [data types](/influxdb/cloud/reference/glossary/#data-type) are easily identifiable at the top of each column underneath the headings. Use this option when data can't be visualized using a visualization type. + + {{< img-hd src="/img/influxdb/cloud-controls-view-raw-data.png" alt="View raw data" />}} + +### Save as CSV + +Click the CSV icon to save the cells contents as a CSV file. + +### Manually refresh dashboard + +Click the refresh button ({{< icon "refresh" >}}) to manually refresh the dashboard's data. + +### Select time range + +1. Select from the time range options in the dropdown menu. + + {{< img-hd src="/img/influxdb/2-0-controls-time-range.png" alt="Select time range" />}} + +2. Select **Custom Time Range** to enter a custom time range with precision up to nanoseconds. +The default time range is 5m. + +> The custom time range uses the selected timezone (local time or UTC). + +### Query Builder or Script Editor + +Click **Query Builder** to use the builder to create a Flux query. Click **Script Editor** to manually edit the query. + +#### Keyboard shortcuts + +In **Script Editor** mode, the following keyboard shortcuts are available: + +| Key | Description | +|--------------------------------|---------------------------------------------| +| `Control + /` (`⌘ + /` on Mac) | Comment/uncomment current or selected lines | +| `Control + Enter` | Submit query | + +## Save your query as a dashboard cell or task + +- Click **Save as** in the upper right, and then: + - To add your query to a dashboard, click **Dashboard Cell**. + - To save your query as a task, click **Task**. + - To save your query as a variable, click **Variable**. diff --git a/content/influxdb/v2.5/query-data/execute-queries/flux-repl.md b/content/influxdb/v2.5/query-data/execute-queries/flux-repl.md new file mode 100644 index 000000000..d405df5bd --- /dev/null +++ b/content/influxdb/v2.5/query-data/execute-queries/flux-repl.md @@ -0,0 +1,19 @@ +--- +title: Query in the Flux REPL +description: Query InfluxDB using the Flux REPL. Discover how to query data in InfluxDB 2.5 using the Flux REPL. +weight: 203 +menu: + influxdb_2_5: + name: Query in the Flux REPL + parent: Execute queries +influxdb/v2.5/tags: [query] +--- + +The [Flux REPL](/influxdb/v2.5/tools/flux-repl/) starts an interactive +Read-Eval-Print Loop (REPL) where you can write and execute Flux queries. + +```sh +./flux repl +``` + +For more information, see [Use the Flux REPL](/influxdb/v2.5/tools/flux-repl/). diff --git a/content/influxdb/v2.5/query-data/execute-queries/influx-api.md b/content/influxdb/v2.5/query-data/execute-queries/influx-api.md new file mode 100644 index 000000000..b2852c591 --- /dev/null +++ b/content/influxdb/v2.5/query-data/execute-queries/influx-api.md @@ -0,0 +1,111 @@ +--- +title: Query with the InfluxDB API +description: Query InfluxDB with the InfluxDB API. Discover how to query data in InfluxDB 2.1 using the InfluxDB API. +weight: 202 +menu: + influxdb_2_5: + name: Query with the InfluxDB API + parent: Execute queries +influxdb/v2.5/tags: [query] +--- + +The [InfluxDB v2 API](/influxdb/v2.5/reference/api) provides a programmatic interface for all interactions with InfluxDB. +To query InfluxDB {{< current-version >}}, do one of the following: + +- Send a Flux query request to the [`/api/v2/query`](/influxdb/v2.5/api/#operation/PostQueryAnalyze) endpoint. +- Send an InfluxQL query request to the [/query 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/query/). + +In your request, set the following: + +- Your organization via the `org` or `orgID` URL parameters. +- `Authorization` header to `Token ` + your API token. +- `Accept` header to `application/csv`. +- `Content-type` header to `application/vnd.flux` (Flux only) or `application/json` (Flux or InfluxQL). +- Query in Flux or InfluxQL with the request's raw data. + +{{% note %}} +#### Use gzip to compress the query response + +To compress the query response, set the `Accept-Encoding` header to `gzip`. +This saves network bandwidth, but increases server-side load. + +We recommend only using gzip compression on responses that are larger than 1.4 KB. +If the response is smaller than 1.4 KB, gzip encoding will always return a 1.4 KB +response, despite the uncompressed response size. +1500 bytes (~1.4 KB) is the maximum transmission unit (MTU) size for the public +network and is the largest packet size allowed at the network layer. +{{% /note %}} + +#### Flux - Example query request + +Below is an example `curl` request that sends a Flux query to InfluxDB {{< current-version >}}: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Without compression](#) +[With compression](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```bash +curl --request POST \ + http://localhost:8086/api/v2/query?orgID=INFLUX_ORG_ID \ + --header 'Authorization: Token INFLUX_TOKEN' \ + --header 'Accept: application/csv' \ + --header 'Content-type: application/vnd.flux' \ + --data 'from(bucket:"example-bucket") + |> range(start: -12h) + |> filter(fn: (r) => r._measurement == "example-measurement") + |> aggregateWindow(every: 1h, fn: mean)' +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```bash +curl --request POST \ + http://localhost:8086/api/v2/query?orgID=INFLUX_ORG_ID \ + --header 'Authorization: Token INFLUX_TOKEN' \ + --header 'Accept: application/csv' \ + --header 'Content-type: application/vnd.flux' \ + --header 'Accept-Encoding: gzip' \ + --data 'from(bucket:"example-bucket") + |> range(start: -12h) + |> filter(fn: (r) => r._measurement == "example-measurement") + |> aggregateWindow(every: 1h, fn: mean)' +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +#### InfluxQL - Example query request + +Below is an example `curl` request that sends an InfluxQL query to InfluxDB {{< current-version >}}: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Without compression](#) +[With compression](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```bash +curl --request -G http://localhost:8086/query?orgID=INFLUX_ORG_ID&database=MyDB&retention_policy=MyRP \ + --header 'Authorization: Token INFLUX_TOKEN' \ + --header 'Accept: application/csv' \ + --header 'Content-type: application/json' \ + --data-urlencode "q=SELECT used_percent FROM example-db.example-rp.example-measurement WHERE host=host1" +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```bash +curl --request -G http://localhost:8086/query?orgID=INFLUX_ORG_ID&database=MyDB&retention_policy=MyRP \ + --header 'Authorization: Token INFLUX_TOKEN' \ + --header 'Accept: application/csv' \ + --header 'Content-type: application/json' \ + --header 'Accept-Encoding: gzip' \ + --data-urlencode "q=SELECT used_percent FROM example-db.example-rp.example-measurement WHERE host=host1" +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +InfluxDB returns the query results in [annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/). diff --git a/content/influxdb/v2.5/query-data/execute-queries/influx-query.md b/content/influxdb/v2.5/query-data/execute-queries/influx-query.md new file mode 100644 index 000000000..a796e7677 --- /dev/null +++ b/content/influxdb/v2.5/query-data/execute-queries/influx-query.md @@ -0,0 +1,41 @@ +--- +title: Use the influx query command +description: Query InfluxDB using the influx CLI. Discover how to query data in InfluxDB 2.1 using `influx query`. +weight: 204 +menu: + influxdb_2_5: + name: Use the influx CLI + parent: Execute queries +influxdb/v2.5/tags: [query] +related: + - /influxdb/v2.5/reference/cli/influx/query/ +--- + +Use the [`influx query` command](/influxdb/v2.5/reference/cli/influx/query) to query data in InfluxDB using Flux. +Pass Flux queries to the command as either a file or via stdin. + +###### Run a query from a file + +```bash +influx query --file /path/to/query.flux +``` + +###### Pass raw Flux via stdin pipe + +```bash +influx query - # Return to open the pipe + +data = from(bucket: "example-bucket") |> range(start: -10m) # ... +# ctrl-d to close the pipe and submit the query +``` + +{{% note %}} +#### Remove unnecessary columns in large datasets +When using the `influx query` command to query and download large datasets, +drop columns such as `_start` and `_stop` to optimize the download file size. + +```js +// ... + |> drop(columns: ["_start", "_stop"]) +``` +{{% /note %}} diff --git a/content/influxdb/v2.5/query-data/execute-queries/query-sample-data.md b/content/influxdb/v2.5/query-data/execute-queries/query-sample-data.md new file mode 100644 index 000000000..50d0f1714 --- /dev/null +++ b/content/influxdb/v2.5/query-data/execute-queries/query-sample-data.md @@ -0,0 +1,88 @@ +--- +title: Query sample data +description: > + Explore InfluxDB OSS with our sample data buckets. +menu: + influxdb_2_5:: + name: Query with sample data + parent: Execute queries +weight: 10 +--- + +Use **InfluxDB OSS** sample datasets to quickly access data that lets you explore and familiarize yourself with InfluxDB Cloud without requiring you to have or write your own data. + +- [Choose sample data](#choose-sample-data) +- [Explore sample data](#explore-sample-data) +- [Create sample data dashboards](#create-sample-data-dashboards) + +{{% note %}} +#### Network bandwidth + +Each execution of `sample.data()` downloads the specified dataset from **Amazon S3**. +If using [InfluxDB Cloud](/influxdb/cloud/) or a hosted InfluxDB OSS instance, +you may see additional network bandwidth costs when using this function. +Approximate sample dataset sizes are listed for each [sample dataset](/influxdb/v2.5/reference/sample-data/#sample-datasets) and in the output of [`sample.list()`](/influxdb/v2.5/reference/flux/stdlib/influxdb-sample/list/). + +{{% /note %}} + +## Choose sample data + +1. Choose from the following sample datasets: + - **Air sensor sample data**: Explore, visualize, and monitor humidity, temperature, and carbon monoxide levels in the air. + - **Bird migration sample data**: Explore, visualize, and monitor the latitude and longitude of bird migration patterns. + - **NOAA NDBC sample data**: Explore, visualize, and monitor NDBC's observations from their buoys. This data observes air temperature, wind speed, and more from specific locations. + - **NOAA water sample data**: Explore, visualize, and monitor temperature, water level, pH, and quality from specific locations. + - **USGS Earthquake data**: Explore, visualize, and monitor earthquake monitoring data. This data includes alerts, cdi, quarry blast, magnitide, and more. +2. Do one of the following to download sample data: + - [Add sample data with community template](#add-sample-data-with-community-templates) + - [Add sample data using the InfluxDB UI](#add-sample-data) + +### Add sample data with community template + +1. Visit the **InfluxDB templates page** in the InfluxDB OSS UI. Click **Settings** > **Templates** in the navigation menu on the left. + + {{< nav-icon "settings" >}} + +2. Paste the Sample Data community temple URL in **resource manifest file** field: + + ``` + https://github.com/influxdata/community-templates/blob/master/sample-data/sample-data.yml + ``` + +## Explore sample data +Use the [Data Explorer](/influxdb/cloud/visualize-data/explore-metrics/) +to query and visualize data in sample data buckets. + +In the navigation menu on the left, click **Data Explorer**. + +{{< nav-icon "explore" >}} + +### Add sample data + +1. In the navigation menu on the left, click **Data (Load Data)** > **Buckets**. + + {{< nav-icon "data" >}} + +2. Click **{{< icon "plus" >}} Create bucket**, and then name your bucket. The bucket will appear in your list of buckets. +3. View the [sample datasets document](/influxdb/cloud/reference/sample-data/#sample-datasets) and choose a sample data to query. +4. Copy the `sample.data()` function listed underneath. +5. Click **Explore** on the left navigation of InfluxDB Cloud and click your bucket, and then click **Script Editor**. +6. Paste the `sample.data()` function. +7. Click **Submit** to run the query. + +For more information about querying in the Script Editor, see how to [Query data with Flux and the Data Explorer](/influxdb/cloud/query-data/execute-queries/data-explorer/#query-data-with-flux-and-the-data-explorer). + +## Create sample data dashboards + +After adding a sample data bucket, create a dashboard specific to the sample dataset: + +1. Click **Boards (Dashboards)** in the navigation menu on the left. + + {{< nav-icon "dashboards" >}} + +2. Click **Create Dashboard > New Dashboard**, and name the dashboard after your bucket. +3. Click **Add Cell**, and select your sample data bucket. +4. Click **Script Editor**. +5. Copy and paste the `sample.data()` function into the script editor. +6. Click **Submit** to run the query. +6. Define the variables of your sample data. diff --git a/content/influxdb/v2.5/query-data/flux/_index.md b/content/influxdb/v2.5/query-data/flux/_index.md new file mode 100644 index 000000000..0e8b7aa96 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/_index.md @@ -0,0 +1,35 @@ +--- +title: Query data with Flux +description: Guides that walk through both common and complex queries and use cases for Flux. +weight: 102 +influxdb/v2.5/tags: [flux, query] +menu: + influxdb_2_5: + name: Query with Flux + parent: Query data +alias: + - /influxdb/v2.5/query-data/guides/ +--- + +The following guides walk through both common and complex queries and use cases for Flux. + +{{% note %}} +#### Example data variable +Many of the examples provided in the following guides use a `data` variable, +which represents a basic query that filters data by measurement and field. +`data` is defined as: + +```js +data = from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "example-measurement" and r._field == "example-field") +``` +{{% /note %}} + +## Flux query guides + +{{< children type="anchored-list" pages="all" >}} + +--- + +{{< children pages="all" readmore=true hr=true >}} diff --git a/content/influxdb/v2.5/query-data/flux/calculate-percentages.md b/content/influxdb/v2.5/query-data/flux/calculate-percentages.md new file mode 100644 index 000000000..1d39b7843 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/calculate-percentages.md @@ -0,0 +1,211 @@ +--- +title: Calculate percentages with Flux +list_title: Calculate percentages +description: > + Use [`pivot()` or `join()`](/influxdb/v2.5/query-data/flux/mathematic-operations/#pivot-vs-join) + and the `map()` function to align operand values into rows and calculate a percentage. +menu: + influxdb_2_5: + name: Calculate percentages + parent: Query with Flux +weight: 209 +aliases: + - /influxdb/v2.5/query-data/guides/calculate-percentages/ +related: + - /influxdb/v2.5/query-data/flux/mathematic-operations + - /{{< latest "flux" >}}/stdlib/universe/map + - /{{< latest "flux" >}}/stdlib/universe/pivot + - /{{< latest "flux" >}}/stdlib/universe/join +list_query_example: percentages +--- + +Calculating percentages from queried data is a common use case for time series data. +To calculate a percentage in Flux, operands must be in each row. +Use `map()` to re-map values in the row and calculate a percentage. + +**To calculate percentages** + +1. Use [`from()`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/from/), + [`range()`](/{{< latest "flux" >}}/stdlib/universe/range/) and + [`filter()`](/{{< latest "flux" >}}/stdlib/universe/filter/) to query operands. +2. Use [`pivot()` or `join()`](/influxdb/v2.5/query-data/flux/mathematic-operations/#pivot-vs-join) + to align operand values into rows. +3. Use [`map()`](/{{< latest "flux" >}}/stdlib/universe/map/) + to divide the numerator operand value by the denominator operand value and multiply by 100. + +{{% note %}} +The following examples use `pivot()` to align operands into rows because +`pivot()` works in most cases and is more performant than `join()`. +_See [Pivot vs join](/influxdb/v2.5/query-data/flux/mathematic-operations/#pivot-vs-join)._ +{{% /note %}} + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "m1" and r._field =~ /field[1-2]/ ) + |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") + |> map(fn: (r) => ({ r with _value: r.field1 / r.field2 * 100.0 })) +``` + +## GPU monitoring example +The following example queries data from the gpu-monitor bucket and calculates the +percentage of GPU memory used over time. +Data includes the following: + +- **`gpu` measurement** +- **`mem_used` field**: used GPU memory in bytes +- **`mem_total` field**: total GPU memory in bytes + +### Query mem_used and mem_total fields +```js +from(bucket: "gpu-monitor") + |> range(start: 2020-01-01T00:00:00Z) + |> filter(fn: (r) => r._measurement == "gpu" and r._field =~ /mem_/) +``` + +###### Returns the following stream of tables: + +| _time | _measurement | _field | _value | +|:----- |:------------:|:------: | ------: | +| 2020-01-01T00:00:00Z | gpu | mem_used | 2517924577 | +| 2020-01-01T00:00:10Z | gpu | mem_used | 2695091978 | +| 2020-01-01T00:00:20Z | gpu | mem_used | 2576980377 | +| 2020-01-01T00:00:30Z | gpu | mem_used | 3006477107 | +| 2020-01-01T00:00:40Z | gpu | mem_used | 3543348019 | +| 2020-01-01T00:00:50Z | gpu | mem_used | 4402341478 | + +

+ +| _time | _measurement | _field | _value | +|:----- |:------------:|:------: | ------: | +| 2020-01-01T00:00:00Z | gpu | mem_total | 8589934592 | +| 2020-01-01T00:00:10Z | gpu | mem_total | 8589934592 | +| 2020-01-01T00:00:20Z | gpu | mem_total | 8589934592 | +| 2020-01-01T00:00:30Z | gpu | mem_total | 8589934592 | +| 2020-01-01T00:00:40Z | gpu | mem_total | 8589934592 | +| 2020-01-01T00:00:50Z | gpu | mem_total | 8589934592 | + +### Pivot fields into columns +Use `pivot()` to pivot the `mem_used` and `mem_total` fields into columns. +Output includes `mem_used` and `mem_total` columns with values for each corresponding `_time`. + +```js +// ... + |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") +``` + +###### Returns the following: + +| _time | _measurement | mem_used | mem_total | +|:----- |:------------:| --------: | ---------: | +| 2020-01-01T00:00:00Z | gpu | 2517924577 | 8589934592 | +| 2020-01-01T00:00:10Z | gpu | 2695091978 | 8589934592 | +| 2020-01-01T00:00:20Z | gpu | 2576980377 | 8589934592 | +| 2020-01-01T00:00:30Z | gpu | 3006477107 | 8589934592 | +| 2020-01-01T00:00:40Z | gpu | 3543348019 | 8589934592 | +| 2020-01-01T00:00:50Z | gpu | 4402341478 | 8589934592 | + +### Map new values +Each row now contains the values necessary to calculate a percentage. +Use `map()` to re-map values in each row. +Divide `mem_used` by `mem_total` and multiply by 100 to return the percentage. + +{{% note %}} +To return a precise float percentage value that includes decimal points, the example +below casts integer field values to floats and multiplies by a float value (`100.0`). +{{% /note %}} + +```js +// ... + |> map( + fn: (r) => ({ + _time: r._time, + _measurement: r._measurement, + _field: "mem_used_percent", + _value: float(v: r.mem_used) / float(v: r.mem_total) * 100.0 + }), + ) +``` +##### Query results: + +| _time | _measurement | _field | _value | +|:----- |:------------:|:------: | ------: | +| 2020-01-01T00:00:00Z | gpu | mem_used_percent | 29.31 | +| 2020-01-01T00:00:10Z | gpu | mem_used_percent | 31.37 | +| 2020-01-01T00:00:20Z | gpu | mem_used_percent | 30.00 | +| 2020-01-01T00:00:30Z | gpu | mem_used_percent | 35.00 | +| 2020-01-01T00:00:40Z | gpu | mem_used_percent | 41.25 | +| 2020-01-01T00:00:50Z | gpu | mem_used_percent | 51.25 | + +### Full query +```js +from(bucket: "gpu-monitor") + |> range(start: 2020-01-01T00:00:00Z) + |> filter(fn: (r) => r._measurement == "gpu" and r._field =~ /mem_/ ) + |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") + |> map( + fn: (r) => ({ + _time: r._time, + _measurement: r._measurement, + _field: "mem_used_percent", + _value: float(v: r.mem_used) / float(v: r.mem_total) * 100.0 + }), + ) +``` + +## Examples + +#### Calculate percentages using multiple fields +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "example-measurement") + |> filter(fn: (r) => r._field == "used_system" or r._field == "used_user" or r._field == "total") + |> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value") + |> map( + fn: (r) => ({ + r with _value: float(v: r.used_system + r.used_user) / float(v: r.total) * 100.0 + }), + ) +``` + +#### Calculate percentages using multiple measurements + +1. Ensure measurements are in the same [bucket](/influxdb/v2.5/reference/glossary/#bucket). +2. Use `filter()` to include data from both measurements. +3. Use `group()` to ungroup data and return a single table. +4. Use `pivot()` to pivot fields into columns. +5. Use `map()` to re-map rows and perform the percentage calculation. + + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => (r._measurement == "m1" or r._measurement == "m2") and (r._field == "field1" or r._field == "field2")) + |> group() + |> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value") + |> map(fn: (r) => ({r with _value: r.field1 / r.field2 * 100.0})) +``` + +#### Calculate percentages using multiple data sources +```js +import "sql" +import "influxdata/influxdb/secrets" + +pgUser = secrets.get(key: "POSTGRES_USER") +pgPass = secrets.get(key: "POSTGRES_PASSWORD") +pgHost = secrets.get(key: "POSTGRES_HOST") + +t1 = sql.from( + driverName: "postgres", + dataSourceName: "postgresql://${pgUser}:${pgPass}@${pgHost}", + query: "SELECT id, name, available FROM example_table", +) + +t2 = from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "example-measurement" and r._field == "example-field") + +join(tables: {t1: t1, t2: t2}, on: ["id"]) + |> map(fn: (r) => ({r with _value: r._value_t2 / r.available_t1 * 100.0})) +``` diff --git a/content/influxdb/v2.5/query-data/flux/conditional-logic.md b/content/influxdb/v2.5/query-data/flux/conditional-logic.md new file mode 100644 index 000000000..ff9146fd6 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/conditional-logic.md @@ -0,0 +1,224 @@ +--- +title: Query using conditional logic +seotitle: Query using conditional logic in Flux +list_title: Conditional logic +description: > + This guide describes how to use Flux conditional expressions, such as `if`, + `else`, and `then`, to query and transform data. **Flux evaluates statements from left to right and stops evaluating once a condition matches.** +influxdb/v2.5/tags: [conditionals, flux] +menu: + influxdb_2_5: + name: Conditional logic + parent: Query with Flux +weight: 220 +aliases: + - /influxdb/v2.5/query-data/guides/conditional-logic/ +related: + - /influxdb/v2.5/query-data/flux/query-fields/ + - /{{< latest "flux" >}}/stdlib/universe/filter/ + - /{{< latest "flux" >}}/stdlib/universe/map/ + - /{{< latest "flux" >}}/stdlib/universe/reduce/ +list_code_example: | + ```js + if color == "green" then "008000" else "ffffff" + ``` +--- + +Flux provides `if`, `then`, and `else` conditional expressions that allow for powerful and flexible Flux queries. + +If you're just getting started with Flux queries, check out the following: + +- [Get started with Flux](/{{< latest "flux" >}}/get-started/) for a conceptual overview of Flux and parts of a Flux query. +- [Execute queries](/influxdb/v2.5/query-data/execute-queries/) to discover a variety of ways to run your queries. + +##### Conditional expression syntax +```js +// Pattern +if then else + +// Example +if color == "green" then "008000" else "ffffff" +``` + +Conditional expressions are most useful in the following contexts: + +- When defining variables. +- When using functions that operate on a single row at a time ( + [`filter()`](/{{< latest "flux" >}}/stdlib/universe/filter/), + [`map()`](/{{< latest "flux" >}}/stdlib/universe/map/), + [`reduce()`](/{{< latest "flux" >}}/stdlib/universe/reduce) ). + +## Evaluating conditional expressions + +Flux evaluates statements in order and stops evaluating once a condition matches. + +For example, given the following statement: + +```js +if r._value > 95.0000001 and r._value <= 100.0 then + "critical" +else if r._value > 85.0000001 and r._value <= 95.0 then + "warning" +else if r._value > 70.0000001 and r._value <= 85.0 then + "high" +else + "normal" +``` + +When `r._value` is 96, the output is "critical" and the remaining conditions are not evaluated. + +## Examples + +- [Conditionally set the value of a variable](#conditionally-set-the-value-of-a-variable) +- [Create conditional filters](#create-conditional-filters) +- [Conditionally transform column values with map()](#conditionally-transform-column-values-with-map) +- [Conditionally increment a count with reduce()](#conditionally-increment-a-count-with-reduce) + +### Conditionally set the value of a variable +The following example sets the `overdue` variable based on the +`dueDate` variable's relation to `now()`. + +```js +dueDate = 2019-05-01 +overdue = if dueDate < now() then true else false +``` + +### Create conditional filters +The following example uses an example `metric` [dashboard variable](/influxdb/v2.5/visualize-data/variables/) +to change how the query filters data. +`metric` has three possible values: + +- Memory +- CPU +- Disk + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter( + fn: (r) => if v.metric == "Memory" then + r._measurement == "mem" and r._field == "used_percent" + else if v.metric == "CPU" then + r._measurement == "cpu" and r._field == "usage_user" + else if v.metric == "Disk" then + r._measurement == "disk" and r._field == "used_percent" + else + r._measurement != "", + ) +``` + + +### Conditionally transform column values with map() +The following example uses the [`map()` function](/{{< latest "flux" >}}/stdlib/universe/map/) +to conditionally transform column values. +It sets the `level` column to a specific string based on `_value` column. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[No Comments](#) +[Comments](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```js +from(bucket: "example-bucket") + |> range(start: -5m) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") + |> map( + fn: (r) => ({r with + level: if r._value >= 95.0000001 and r._value <= 100.0 then + "critical" + else if r._value >= 85.0000001 and r._value <= 95.0 then + "warning" + else if r._value >= 70.0000001 and r._value <= 85.0 then + "high" + else + "normal", + }), + ) +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +from(bucket: "example-bucket") + |> range(start: -5m) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") + |> map( + fn: (r) => ({ + // Retain all existing columns in the mapped row + r with + // Set the level column value based on the _value column + level: if r._value >= 95.0000001 and r._value <= 100.0 then + "critical" + else if r._value >= 85.0000001 and r._value <= 95.0 then + "warning" + else if r._value >= 70.0000001 and r._value <= 85.0 then + "high" + else + "normal", + }), + ) +``` + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +### Conditionally increment a count with reduce() +The following example uses the [`aggregateWindow()`](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow/) +and [`reduce()`](/{{< latest "flux" >}}/stdlib/universe/reduce/) +functions to count the number of records in every five minute window that exceed a defined threshold. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[No Comments](#) +[Comments](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```js +threshold = 65.0 + +data = from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") + |> aggregateWindow( + every: 5m, + fn: (column, tables=<-) => tables + |> reduce( + identity: {above_threshold_count: 0.0}, + fn: (r, accumulator) => ({ + above_threshold_count: if r._value >= threshold then + accumulator.above_threshold_count + 1.0 + else + accumulator.above_threshold_count + 0.0, + }), + ), + ) +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +threshold = 65.0 + +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") + // Aggregate data into 5 minute windows using a custom reduce() function + |> aggregateWindow( + every: 5m, + // Use a custom function in the fn parameter. + // The aggregateWindow fn parameter requires 'column' and 'tables' parameters. + fn: (column, tables=<-) => tables + |> reduce( + identity: {above_threshold_count: 0.0}, + fn: (r, accumulator) => ({ + // Conditionally increment above_threshold_count if + // r.value exceeds the threshold + above_threshold_count: if r._value >= threshold then + accumulator.above_threshold_count + 1.0 + else + accumulator.above_threshold_count + 0.0, + }), + ), + ) +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} diff --git a/content/influxdb/v2.5/query-data/flux/cumulativesum.md b/content/influxdb/v2.5/query-data/flux/cumulativesum.md new file mode 100644 index 000000000..06d01a507 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/cumulativesum.md @@ -0,0 +1,70 @@ +--- +title: Query cumulative sum +seotitle: Query cumulative sum in Flux +list_title: Cumulative sum +description: > + Use the `cumulativeSum()` function to calculate a running total of values. +weight: 210 +menu: + influxdb_2_5: + parent: Query with Flux + name: Cumulative sum +influxdb/v2.5/tags: [query, cumulative sum] +related: + - /{{< latest "flux" >}}/stdlib/universe/cumulativesum/ +list_query_example: cumulative_sum +--- + +Use the [`cumulativeSum()` function](/{{< latest "flux" >}}/stdlib/universe/cumulativesum/) +to calculate a running total of values. +`cumulativeSum` sums the values of subsequent records and returns each row updated with the summed total. + +{{< flex >}} +{{% flex-content "half" %}} +**Given the following input table:** + +| _time | _value | +| ----- |:------:| +| 0001 | 1 | +| 0002 | 2 | +| 0003 | 1 | +| 0004 | 3 | +{{% /flex-content %}} +{{% flex-content "half" %}} +**`cumulativeSum()` returns:** + +| _time | _value | +| ----- |:------:| +| 0001 | 1 | +| 0002 | 3 | +| 0003 | 4 | +| 0004 | 7 | +{{% /flex-content %}} +{{< /flex >}} + +{{% note %}} +The examples below use the [example data variable](/influxdb/v2.5/query-data/flux/#example-data-variable). +{{% /note %}} + +##### Calculate the running total of values +```js +data + |> cumulativeSum() +``` + +## Use cumulativeSum() with aggregateWindow() +[`aggregateWindow()`](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow/) +segments data into windows of time, aggregates data in each window into a single +point, then removes the time-based segmentation. +It is primarily used to [downsample data](/influxdb/v2.5/process-data/common-tasks/downsample-data/). + +`aggregateWindow()` expects an aggregate function that returns a single row for each time window. +To use `cumulativeSum()` with `aggregateWindow`, use `sum` in `aggregateWindow()`, +then calculate the running total of the aggregate values with `cumulativeSum()`. + + +```js +data + |> aggregateWindow(every: 5m, fn: sum) + |> cumulativeSum() +``` diff --git a/content/influxdb/v2.5/query-data/flux/custom-functions/_index.md b/content/influxdb/v2.5/query-data/flux/custom-functions/_index.md new file mode 100644 index 000000000..e00d5493b --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/custom-functions/_index.md @@ -0,0 +1,223 @@ +--- +title: Create custom Flux functions +description: Create your own custom Flux functions to transform and operate on data. +list_title: Custom functions +influxdb/v2.5/tags: [functions, custom, flux] +menu: + influxdb_2_5: + name: Custom functions + parent: Query with Flux +weight: 220 +aliases: + - /influxdb/v2.5/query-data/guides/custom-functions/ +list_code_example: | + ```js + multByX = (tables=<-, x) => tables + |> map(fn: (r) => ({r with _value: r._value * x})) + + data + |> multByX(x: 2.0) + ``` +--- + +Flux's functional syntax lets you create custom functions. +This guide walks through the basics of creating your own function. + +- [Function definition syntax](#function-definition-syntax) +- [Use piped-forward data in a custom function](#use-piped-forward-data-in-a-custom-function) +- [Define parameter defaults](#define-parameter-defaults) +- [Define functions with scoped variables](#define-functions-with-scoped-variables) + +## Function definition syntax +The basic syntax for defining functions in Flux is as follows: + +```js +// Basic function definition syntax +functionName = (functionParameters) => functionOperations +``` + +##### functionName +The name used to call the function in your Flux script. + +##### functionParameters +A comma-separated list of parameters passed into the function and used in its operations. +[Parameter defaults](#define-parameter-defaults) can be defined for each. + +##### functionOperations +Operations and functions that manipulate the input into the desired output. + +#### Basic function examples + +###### Example square function +```js +// Function definition +square = (n) => n * n + +// Function usage +> square(n:3) +9 +``` + +###### Example multiply function +```js +// Function definition +multiply = (x, y) => x * y + +// Function usage +> multiply(x: 2, y: 15) +30 +``` + +## Use piped-forward data in a custom function +Most Flux functions process piped-forward data. +To process piped-forward data, one of the function +parameters must capture the input tables using the `<-` pipe-receive expression. + +In the example below, the `tables` parameter is assigned to the `<-` expression, +which represents all data piped-forward into the function. +`tables` is then piped-forward into other operations in the function definition. + +```js +functionName = (tables=<-) => tables |> functionOperations +``` + +#### Pipe-forwardable function example + +###### Multiply row values by x +The example below defines a `multByX` function that multiplies the `_value` column +of each row in the input table by the `x` parameter. +It uses the [`map()` function](/{{< latest "flux" >}}/stdlib/universe/map) +to modify each `_value`. + +```js +// Function definition +multByX = (tables=<-, x) => tables + |> map(fn: (r) => ({r with _value: r._value * x})) + +// Function usage +from(bucket: "example-bucket") + |> range(start: -1m) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") + |> multByX(x: 2.0) +``` + +## Define parameter defaults +Use the `=` assignment operator to assign a default value to function parameters +in your function definition: + +```js +functionName = (param1=defaultValue1, param2=defaultValue2) => functionOperation +``` + +Defaults are overridden by explicitly defining the parameter in the function call. + +### Example functions with defaults + +#### Get a list of leaders +The example below defines a `leaderBoard` function that returns a limited number +of records sorted by values in specified columns. +It uses the [`sort()` function](/{{< latest "flux" >}}/stdlib/universe/sort) +to sort records in either descending or ascending order. +It then uses the [`limit()` function](/{{< latest "flux" >}}/stdlib/universe/limit) +to return a specified number of records from the sorted table. + +```js +// Function definition +leaderBoard = (tables=<-, limit=4, columns=["_value"], desc=true) => tables + |> sort(columns: columns, desc: desc) + |> limit(n: limit) + +// Function usage +// Get the 4 highest scoring players +from(bucket: "example-bucket") + |> range(start: -1m) + |> filter(fn: (r) => r._measurement == "player-stats" and r._field == "total-points") + |> leaderBoard() + +// Get the 10 shortest race times +from(bucket: "example-bucket") + |> range(start: -1m) + |> filter(fn: (r) => r._measurement == "race-times" and r._field == "elapsed-time") + |> leaderBoard(limit: 10, desc: false) +``` + +## Define functions with scoped variables +To create custom functions with variables scoped to the function, place your +function operations and variables inside of a [block (`{}`)](/influxdb/v2.5/reference/flux/language/blocks/) +and use a `return` statement to return a specific variable. + +```js +functionName = (functionParameters) => { + exampleVar = "foo" + + return exampleVar +} +``` + +### Example functions with scoped variables + +- [Return an alert level based on a value](#return-an-alert-level-based-on-a-value) +- [Convert a HEX color code to a name](#convert-a-hex-color-code-to-a-name) + +#### Return an alert level based on a value +The following function uses conditional logic to return an alert level based on +a numeric input value: + +```js +alertLevel = (v) => { + level = if float(v: v) >= 90.0 then + "crit" + else if float(v: v) >= 80.0 then + "warn" + else if float(v: v) >= 65.0 then + "info" + else + "ok" + + return level +} + +alertLevel(v: 87.3) +// Returns "warn" +``` + +#### Convert a HEX color code to a name +The following function converts a hexadecimal (HEX) color code to the equivalent HTML color name. +The functions uses the [Flux dictionary package](/{{< latest "flux" >}}/stdlib/dict/) +to create a dictionary of HEX codes and their corresponding names. + +```js +import "dict" + +hexName = (hex) => { + hexNames = dict.fromList( + pairs: [ + {key: "#00ffff", value: "Aqua"}, + {key: "#000000", value: "Black"}, + {key: "#0000ff", value: "Blue"}, + {key: "#ff00ff", value: "Fuchsia"}, + {key: "#808080", value: "Gray"}, + {key: "#008000", value: "Green"}, + {key: "#00ff00", value: "Lime"}, + {key: "#800000", value: "Maroon"}, + {key: "#000080", value: "Navy"}, + {key: "#808000", value: "Olive"}, + {key: "#800080", value: "Purple"}, + {key: "#ff0000", value: "Red"}, + {key: "#c0c0c0", value: "Silver"}, + {key: "#008080", value: "Teal"}, + {key: "#ffffff", value: "White"}, + {key: "#ffff00", value: "Yellow"}, + ], + ) + name = dict.get(dict: hexNames, key: hex, default: "No known name") + + return name +} + +hexName(hex: "#000000") +// Returns "Black" + +hexName(hex: "#8b8b8b") +// Returns "No known name" +``` diff --git a/content/influxdb/v2.5/query-data/flux/custom-functions/custom-aggregate.md b/content/influxdb/v2.5/query-data/flux/custom-functions/custom-aggregate.md new file mode 100644 index 000000000..f8b24d69f --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/custom-functions/custom-aggregate.md @@ -0,0 +1,245 @@ +--- +title: Create custom aggregate functions +description: Create your own custom aggregate functions in Flux using the `reduce()` function. +influxdb/v2.5/tags: [functions, custom, flux, aggregates] +menu: + influxdb_2_5: + name: Custom aggregate functions + parent: Custom functions +weight: 301 +aliases: + - /influxdb/v2.5/query-data/guides/custom-functions/custom-aggregate/ +related: + - /i{{< latest "flux" >}}/stdlib/universe/reduce/ +--- + +To aggregate your data, use the Flux +[aggregate functions](/{{< latest "flux" >}}/function-types#aggregates) +or create custom aggregate functions using the +[`reduce()`function](/{{< latest "flux" >}}/stdlib/universe/reduce/). + +## Aggregate function characteristics +Aggregate functions all have the same basic characteristics: + +- They operate on individual input tables and transform all records into a single record. +- The output table has the same [group key](/{{< latest "flux" >}}/get-started/data-model/#group-key) as the input table. + +## How reduce() works +The `reduce()` function operates on one row at a time using the function defined in +the [`fn` parameter](/{{< latest "flux" >}}/stdlib/universe/reduce/#fn). +The `fn` function maps keys to specific values using two [records](/{{< latest "flux" >}}/data-types/composite/record/) +specified by the following parameters: + +| Parameter | Description | +|:---------: |:----------- | +| `r` | A record that represents the row or record. | +| `accumulator` | A record that contains values used in each row's aggregate calculation. | + +{{% note %}} +The `reduce()` function's [`identity` parameter](/{{< latest "flux" >}}/stdlib/universe/reduce/#identity) +defines the initial `accumulator` record. +{{% /note %}} + +### Example reduce() function +The following example `reduce()` function produces a sum and product of all values +in an input table. + +```js +|> reduce( + fn: (r, accumulator) => ({ + sum: r._value + accumulator.sum, + product: r._value * accumulator.product + }), + identity: {sum: 0.0, product: 1.0}, +) +``` + +To illustrate how this function works, take this simplified table for example: + +| _time | _value | +|:----- | ------:| +| 2019-04-23T16:10:49Z | 1.6 | +| 2019-04-23T16:10:59Z | 2.3 | +| 2019-04-23T16:11:09Z | 0.7 | +| 2019-04-23T16:11:19Z | 1.2 | +| 2019-04-23T16:11:29Z | 3.8 | + +###### Input records +The `fn` function uses the data in the first row to define the `r` record. +It defines the `accumulator` record using the `identity` parameter. + +```js +r = { _time: 2019-04-23T16:10:49.00Z, _value: 1.6 } +accumulator = { sum : 0.0, product : 1.0 } +``` + +###### Key mappings +It then uses the `r` and `accumulator` records to populate values in the key mappings: +```js +// sum: r._value + accumulator.sum +sum: 1.6 + 0.0 + +// product: r._value * accumulator.product +product: 1.6 * 1.0 +``` + +###### Output record +This produces an output record with the following key value pairs: + +```js +{ sum: 1.6, product: 1.6 } +``` + +The function then processes the next row using this **output record** as the `accumulator`. + +{{% note %}} +Because `reduce()` uses the output record as the `accumulator` when processing the next row, +keys mapped in the `fn` function must match keys in the `identity` and `accumulator` records. +{{% /note %}} + +###### Processing the next row +```js +// Input records for the second row +r = { _time: 2019-04-23T16:10:59.00Z, _value: 2.3 } +accumulator = { sum : 1.6, product : 1.6 } + +// Key mappings for the second row +sum: 2.3 + 1.6 +product: 2.3 * 1.6 + +// Output record of the second row +{ sum: 3.9, product: 3.68 } +``` + +It then uses the new output record as the `accumulator` for the next row. +This cycle continues until all rows in the table are processed. + +##### Final output record and table +After all records in the table are processed, `reduce()` uses the final output record +to create a transformed table with one row and columns for each mapped key. + +###### Final output record +```js +{ sum: 9.6, product: 11.74656 } +``` + +###### Output table +| sum | product | +| --- | -------- | +| 9.6 | 11.74656 | + +{{% note %}} +#### What happened to the \_time column? +The `reduce()` function only keeps columns that are: + +1. Are part of the input table's [group key](/{{< latest "flux" >}}/get-started/data-model/#group-key). +2. Explicitly mapped in the `fn` function. + +It drops all other columns. +Because `_time` is not part of the group key and is not mapped in the `fn` function, +it isn't included in the output table. +{{% /note %}} + +## Custom aggregate function examples +To create custom aggregate functions, use principles outlined in +[Creating custom functions](/influxdb/v2.5/query-data/flux/custom-functions) +and the `reduce()` function to aggregate rows in each input table. + +### Create a custom average function +This example illustrates how to create a function that averages values in a table. +_This is meant for demonstration purposes only. +The built-in [`mean()` function](/{{< latest "flux" >}}/stdlib/universe/mean/) +does the same thing and is much more performant._ + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Comments](#) +[No Comments](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} + +```js +average = (tables=<-, outputField="average") => tables + |> reduce( + // Define the initial accumulator record + identity: {count: 0.0, sum: 0.0, avg: 0.0}, + fn: (r, accumulator) => ({ + // Increment the counter on each reduce loop + count: accumulator.count + 1.0, + // Add the _value to the existing sum + sum: accumulator.sum + r._value, + // Divide the existing sum by the existing count for a new average + avg: (accumulator.sum + r._value) / (accumulator.count + 1.0), + }), + ) + // Drop the sum and the count columns since they are no longer needed + |> drop(columns: ["sum", "count"]) + // Set the _field column of the output table to to the value + // provided in the outputField parameter + |> set(key: "_field", value: outputField) + // Rename avg column to _value + |> rename(columns: {avg: "_value"}) +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```js +average = (tables=<-, outputField="average") => tables + |> reduce( + identity: {count: 0.0, sum: 0.0, avg: 0.0}, + fn: (r, accumulator) => ({ + count: accumulator.count + 1.0, + sum: accumulator.sum + r._value, + avg: (accumulator.sum + r._value) / (accumulator.count + 1.0), + }), + ) + |> drop(columns: ["sum", "count"]) + |> set(key: "_field", value: outputField) + |> rename(columns: {avg: "_value"}) +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +### Aggregate multiple columns +Built-in aggregate functions only operate on one column. +Use `reduce()` to create a custom aggregate function that aggregates multiple columns. + +The following function expects input tables to have `c1_value` and `c2_value` +columns and generates an average for each. + +```js +multiAvg = (tables=<-) => tables + |> reduce( + identity: { + count: 1.0, + c1_sum: 0.0, + c1_avg: 0.0, + c2_sum: 0.0, + c2_avg: 0.0, + }, + fn: (r, accumulator) => ({ + count: accumulator.count + 1.0, + c1_sum: accumulator.c1_sum + r.c1_value, + c1_avg: accumulator.c1_sum / accumulator.count, + c2_sum: accumulator.c2_sum + r.c2_value, + c2_avg: accumulator.c2_sum / accumulator.count, + }), + ) +``` + +### Aggregate gross and net profit +Use `reduce()` to create a function that aggregates gross and net profit. +This example expects `profit` and `expenses` columns in the input tables. + +```js +profitSummary = (tables=<-) => tables + |> reduce( + identity: {gross: 0.0, net: 0.0}, + fn: (r, accumulator) => ({ + gross: accumulator.gross + r.profit, + net: accumulator.net + r.profit - r.expenses + } + ) + ) +``` diff --git a/content/influxdb/v2.5/query-data/flux/exists.md b/content/influxdb/v2.5/query-data/flux/exists.md new file mode 100644 index 000000000..2e15e3954 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/exists.md @@ -0,0 +1,115 @@ +--- +title: Check if a value exists +seotitle: Use Flux to check if a value exists +list_title: Exists +description: > + Use the Flux `exists` operator to check if a row record contains a column or if + that column's value is `null`. +influxdb/v2.5/tags: [exists] +menu: + influxdb_2_5: + name: Exists + parent: Query with Flux +weight: 220 +aliases: + - /influxdb/v2.5/query-data/guides/exists/ +related: + - /influxdb/v2.5/query-data/flux/query-fields/ + - /{{< latest "flux" >}}/stdlib/universe/filter/ +list_code_example: | + ##### Filter null values + ```js + data + |> filter(fn: (r) => exists r._value) + ``` +--- + +Use the `exists` operator to check if a row record contains a column or if a +column's value is _null_. + +```js +(r) => exists r.column +``` + +If you're just getting started with Flux queries, check out the following: + +- [Get started with Flux](/{{< latest "flux" >}}/get-started/) for a conceptual overview of Flux and parts of a Flux query. +- [Execute queries](/influxdb/v2.5/query-data/execute-queries/) to discover a variety of ways to run your queries. + +Use `exists` with row functions ( +[`filter()`](/{{< latest "flux" >}}/stdlib/universe/filter/), +[`map()`](/{{< latest "flux" >}}/stdlib/universe/map/), +[`reduce()`](/{{< latest "flux" >}}/stdlib/universe/reduce/)) +to check if a row includes a column or if the value for that column is _null_. + +#### Filter null values + +```js +from(bucket: "example-bucket") + |> range(start: -5m) + |> filter(fn: (r) => exists r._value) +``` + +#### Map values based on existence + +```js +from(bucket: "default") + |> range(start: -30s) + |> map( + fn: (r) => ({r with + human_readable: if exists r._value then + "${r._field} is ${string(v: r._value)}." + else + "${r._field} has no value.", + }), + ) +``` + +#### Ignore null values in a custom aggregate function + +```js +customSumProduct = (tables=<-) => tables + |> reduce( + identity: {sum: 0.0, product: 1.0}, + fn: (r, accumulator) => ({r with + sum: if exists r._value then + r._value + accumulator.sum + else + accumulator.sum, + product: if exists r._value then + r.value * accumulator.product + else + accumulator.product, + }), + ) +``` + +#### Check if a statically defined record contains a key + +When you use the [record literal syntax](/flux/v0.x/data-types/composite/record/#record-syntax) +to statically define a record, Flux knows the record type and what keys to expect. + +- If the key exists in the static record, `exists` returns `true`. +- If the key exists in the static record, but has a _null_ value, `exists` returns `false`. +- If the key does not exist in the static record, because the record type is + statically known, `exists` returns an error. + +```js +import "internal/debug" + +p = { + firstName: "John", + lastName: "Doe", + age: 42, + height: debug.null(type: "int"), +} + +exists p.firstName +// Returns true + +exists p.height +// Returns false + +exists p.hairColor +// Returns "error: record is missing label hairColor" +``` diff --git a/content/influxdb/v2.5/query-data/flux/explore-schema.md b/content/influxdb/v2.5/query-data/flux/explore-schema.md new file mode 100644 index 000000000..8a57eeeaf --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/explore-schema.md @@ -0,0 +1,291 @@ +--- +title: Explore your data schema with Flux +list_title: Explore your schema +description: > + Flux provides functions that let you explore the structure and schema of your + data stored in InfluxDB. +influxdb/v2.5/tags: [schema] +menu: + influxdb_2_5: + name: Explore your schema + parent: Query with Flux +weight: 206 +related: + - /{{< latest "flux" >}}/stdlib/universe/buckets/ + - /{{< latest "flux" >}}/stdlib/schema/measurements + - /{{< latest "flux" >}}/stdlib/schema/fieldkeys + - /{{< latest "flux" >}}/stdlib/schema/measurementfieldkeys + - /{{< latest "flux" >}}/stdlib/schema/tagkeys + - /{{< latest "flux" >}}/stdlib/schema/measurementtagkeys + - /{{< latest "flux" >}}/stdlib/schema/tagvalues + - /{{< latest "flux" >}}/stdlib/schema/measurementtagvalues +list_code_example: | + ```js + import "influxdata/influxdb/schema" + + // List buckets + buckets() + + // List measurements + schema.measurements(bucket: "example-bucket") + + // List field keys + schema.fieldKeys(bucket: "example-bucket") + + // List tag keys + schema.tagKeys(bucket: "example-bucket") + + // List tag values + schema.tagValues(bucket: "example-bucket", tag: "example-tag") + ``` +--- + +Flux provides functions that let you explore the structure and schema of your +data stored in InfluxDB. + +- [List buckets](#list-buckets) +- [List measurements](#list-measurements) +- [List field keys](#list-field-keys) +- [List tag keys](#list-tag-keys) +- [List tag values](#list-tag-values) + +{{% warn %}} +Functions in the `schema` package are not supported in the [Flux REPL](/influxdb/v2.5/tools/repl/). +{{% /warn %}} + +## List buckets +Use [`buckets()`](/{{< latest "flux" >}}/stdlib/universe/buckets/) +to list **buckets in your organization**. + +```js +buckets() +``` + +{{< expand-wrapper >}} +{{% expand "View example `buckets()` output" %}} + +`buckets()` returns a single table with the following columns: + +- **organizationID**: Organization ID +- **name**: Bucket name +- **id**: Bucket ID +- **retentionPolicy**: Retention policy associated with the bucket +- **retentionPeriod**: Retention period in nanoseconds + +| organizationID | name | id | retentionPolicy | retentionPeriod | +| :------------- | :--------------- | :------ | :-------------- | --------------: | +| XooX0x0 | _monitoring | XooX0x1 | | 604800000000000 | +| XooX0x0 | _tasks | XooX0x2 | | 259200000000000 | +| XooX0x0 | example-bucket-1 | XooX0x3 | | 0 | +| XooX0x0 | example-bucket-2 | XooX0x4 | | 0 | +| XooX0x0 | example-bucket-3 | XooX0x5 | | 172800000000000 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## List measurements +Use [`schema.measurements()`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/measurements) +to list **measurements in a bucket**. +_By default, this function returns results from the last 30 days._ + +```js +import "influxdata/influxdb/schema" + +schema.measurements(bucket: "example-bucket") +``` + +{{< expand-wrapper >}} +{{% expand "View example `schema.measurements()` output" %}} + +`schema.measurements()` returns a single table with a `_value` column. +Each row contains the name of a measurement. + +| _value | +| :----- | +| m1 | +| m2 | +| m3 | +| m4 | +| m5 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## List field keys +Use [`schema.fieldKeys`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/fieldkeys) +to list **field keys in a bucket**. +_By default, this function returns results from the last 30 days._ + +```js +import "influxdata/influxdb/schema" + +schema.fieldKeys(bucket: "example-bucket") +``` + +{{< expand-wrapper >}} +{{% expand "View example `schema.fieldKeys()` output" %}} + +`schema.fieldKeys()` returns a single table with a `_value` column. +Each row contains a unique field key from the specified bucket. + +| _value | +| :----- | +| field1 | +| field2 | +| field3 | +| field4 | +| field5 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +### List fields in a measurement +Use [`schema.measurementFieldKeys`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/measurementfieldkeys) +to list **field keys in a measurement**. +_By default, this function returns results from the last 30 days._ + +```js +import "influxdata/influxdb/schema" + +schema.measurementFieldKeys( + bucket: "example-bucket", + measurement: "example-measurement", +) +``` + +{{< expand-wrapper >}} +{{% expand "View example `schema.measurementFieldKeys()` output" %}} + +`schema.measurementFieldKeys()` returns a single table with a `_value` column. +Each row contains the name of a unique field key in the specified bucket and measurement. + +| _value | +| :----- | +| field1 | +| field2 | +| field3 | +| field4 | +| field5 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## List tag keys +Use [`schema.tagKeys()`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/tagkeys) +to list **tag keys in a bucket**. +_By default, this function returns results from the last 30 days._ + +```js +import "influxdata/influxdb/schema" + +schema.tagKeys(bucket: "example-bucket") +``` + +{{< expand-wrapper >}} +{{% expand "View example `schema.tagKeys()` output" %}} + +`schema.tagKeys()` returns a single table with a `_value` column. +Each row contains the a unique tag key from the specified bucket. + +| _value | +| :----- | +| tag1 | +| tag2 | +| tag3 | +| tag4 | +| tag5 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +### List tag keys in a measurement +Use [`schema.measurementTagKeys`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/measurementtagkeys) +to list **tag keys in a measurement**. +_By default, this function returns results from the last 30 days._ + +```js +import "influxdata/influxdb/schema" + +schema.measurementTagKeys( + bucket: "example-bucket", + measurement: "example-measurement", +) +``` + +{{< expand-wrapper >}} +{{% expand "View example `schema.measurementTagKeys()` output" %}} + +`schema.measurementTagKeys()` returns a single table with a `_value` column. +Each row contains a unique tag key from the specified bucket and measurement. + +| _value | +| :----- | +| tag1 | +| tag2 | +| tag3 | +| tag4 | +| tag5 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## List tag values +Use [`schema.tagValues()`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/tagvalues) +to list **tag values for a given tag in a bucket**. +_By default, this function returns results from the last 30 days._ + +```js +import "influxdata/influxdb/schema" + +schema.tagValues(bucket: "example-bucket", tag: "example-tag") +``` + +{{< expand-wrapper >}} +{{% expand "View example `schema.tagValues()` output" %}} + +`schema.tagValues()` returns a single table with a `_value` column. +Each row contains a unique tag value from the specified bucket and tag key. + +| _value | +| :-------- | +| tagValue1 | +| tagValue2 | +| tagValue3 | +| tagValue4 | +| tagValue5 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +### List tag values in a measurement +Use [`schema.measurementTagValues`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/measurementtagvalues) +to list **tag values for a given tag in a measurement**. +_By default, this function returns results from the last 30 days._ + +```js +import "influxdata/influxdb/schema" + +schema.measurementTagValues( + bucket: "example-bucket", + tag: "example-tag", + measurement: "example-measurement", +) +``` + +{{< expand-wrapper >}} +{{% expand "View example `schema.measurementTagValues()` output" %}} + +`schema.measurementTagValues()` returns a single table with a `_value` column. +Each row contains a unique tag value from the specified bucket, measurement, +and tag key. + +| _value | +| :-------- | +| tagValue1 | +| tagValue2 | +| tagValue3 | +| tagValue4 | +| tagValue5 | + +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/influxdb/v2.5/query-data/flux/fill.md b/content/influxdb/v2.5/query-data/flux/fill.md new file mode 100644 index 000000000..565cffb14 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/fill.md @@ -0,0 +1,113 @@ +--- +title: Fill null values in data +seotitle: Fill null values in data +list_title: Fill +description: > + Use `fill()` function to replace _null_ values. +weight: 210 +menu: + influxdb_2_5: + parent: Query with Flux + name: Fill +influxdb/v2.5/tags: [query, fill] +related: + - /{{< latest "flux" >}}/stdlib/universe/fill/ +list_query_example: fill_null +--- + +Use [`fill()`](/{{< latest "flux" >}}/stdlib/universe/fill/) +to replace _null_ values with: + +- [the previous non-null value](#fill-with-the-previous-value) +- [a specified value](#fill-with-a-specified-value) + + +```js +data + |> fill(usePrevious: true) + +// OR + +data + |> fill(value: 0.0) +``` + +{{% note %}} +#### Fill empty windows of time +The `fill()` function **does not** fill empty windows of time. +It only replaces _null_ values in existing data. +Filling empty windows of time requires time interpolation +_(see [influxdata/flux#2428](https://github.com/influxdata/flux/issues/2428))_. +{{% /note %}} + +## Fill with the previous value +To fill _null_ values with the previous **non-null** value, set the `usePrevious` parameter to `true`. + +{{% note %}} +Values remain _null_ if there is no previous non-null value in the table. +{{% /note %}} + +```js +data + |> fill(usePrevious: true) +``` + +{{< flex >}} +{{% flex-content %}} +**Given the following input:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | null | +| 2020-01-01T00:02:00Z | 0.8 | +| 2020-01-01T00:03:00Z | null | +| 2020-01-01T00:04:00Z | null | +| 2020-01-01T00:05:00Z | 1.4 | +{{% /flex-content %}} +{{% flex-content %}} +**`fill(usePrevious: true)` returns:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | null | +| 2020-01-01T00:02:00Z | 0.8 | +| 2020-01-01T00:03:00Z | 0.8 | +| 2020-01-01T00:04:00Z | 0.8 | +| 2020-01-01T00:05:00Z | 1.4 | +{{% /flex-content %}} +{{< /flex >}} + +## Fill with a specified value +To fill _null_ values with a specified value, use the `value` parameter to specify the fill value. +_The fill value must match the [data type](/{{< latest "flux" >}}/spec/types/#basic-types) +of the [column](/{{< latest "flux" >}}/stdlib/universe/fill/#column)._ + +```js +data + |> fill(value: 0.0) +``` + +{{< flex >}} +{{% flex-content %}} +**Given the following input:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | null | +| 2020-01-01T00:02:00Z | 0.8 | +| 2020-01-01T00:03:00Z | null | +| 2020-01-01T00:04:00Z | null | +| 2020-01-01T00:05:00Z | 1.4 | +{{% /flex-content %}} +{{% flex-content %}} +**`fill(value: 0.0)` returns:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | 0.0 | +| 2020-01-01T00:02:00Z | 0.8 | +| 2020-01-01T00:03:00Z | 0.0 | +| 2020-01-01T00:04:00Z | 0.0 | +| 2020-01-01T00:05:00Z | 1.4 | +{{% /flex-content %}} +{{< /flex >}} diff --git a/content/influxdb/v2.5/query-data/flux/first-last.md b/content/influxdb/v2.5/query-data/flux/first-last.md new file mode 100644 index 000000000..65469b7d1 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/first-last.md @@ -0,0 +1,145 @@ +--- +title: Query first and last values +seotitle: Query first and last values in Flux +list_title: First and last +description: > + Use `first()` or `last()` to return the first or last point in an input table. +weight: 210 +menu: + influxdb_2_5: + parent: Query with Flux + name: First & last +influxdb/v2.5/tags: [query] +related: + - /{{< latest "flux" >}}/stdlib/universe/first/ + - /{{< latest "flux" >}}/stdlib/universe/last/ +list_query_example: first_last +--- + +Use [`first()`](/{{< latest "flux" >}}/stdlib/universe/first/) or +[`last()`](/{{< latest "flux" >}}/stdlib/universe/last/) to return the first or +last record in an input table. + +```js +data + |> first() + +// OR + +data + |> last() +``` + +{{% note %}} +By default, InfluxDB returns results sorted by time, however you can use the +[`sort()` function](/{{< latest "flux" >}}/stdlib/universe/sort/) +to change how results are sorted. +`first()` and `last()` respect the sort order of input data and return records +based on the order they are received in. +{{% /note %}} + +### first +`first()` returns the first non-null record in an input table. + +{{< flex >}} +{{% flex-content %}} +**Given the following input:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | 1.0 | +| 2020-01-01T00:02:00Z | 1.0 | +| 2020-01-01T00:03:00Z | 2.0 | +| 2020-01-01T00:04:00Z | 3.0 | +{{% /flex-content %}} +{{% flex-content %}} +**The following function returns:** +```js +|> first() +``` + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | 1.0 | +{{% /flex-content %}} +{{< /flex >}} + +### last +`last()` returns the last non-null record in an input table. + +{{< flex >}} +{{% flex-content %}} +**Given the following input:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | 1.0 | +| 2020-01-01T00:02:00Z | 1.0 | +| 2020-01-01T00:03:00Z | 2.0 | +| 2020-01-01T00:04:00Z | 3.0 | +{{% /flex-content %}} +{{% flex-content %}} +**The following function returns:** + +```js +|> last() +``` + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:04:00Z | 3.0 | +{{% /flex-content %}} +{{< /flex >}} + +## Use first() or last() with aggregateWindow() +Use `first()` and `last()` with [`aggregateWindow()`](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow/) +to select the first or last records in time-based groups. +`aggregateWindow()` segments data into windows of time, aggregates data in each window into a single +point using aggregate or selector functions, and then removes the time-based segmentation. + + +{{< flex >}} +{{% flex-content %}} +**Given the following input:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:00:00Z | 10 | +| 2020-01-01T00:00:15Z | 12 | +| 2020-01-01T00:00:45Z | 9 | +| 2020-01-01T00:01:05Z | 9 | +| 2020-01-01T00:01:10Z | 15 | +| 2020-01-01T00:02:30Z | 11 | +{{% /flex-content %}} + +{{% flex-content %}} +**The following function returns:** +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[first](#) +[last](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```js +|> aggregateWindow(every: 1h, fn: first) +``` +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:00:59Z | 10 | +| 2020-01-01T00:01:59Z | 9 | +| 2020-01-01T00:02:59Z | 11 | +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +|> aggregateWindow(every: 1h, fn: last) +``` + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:00:59Z | 9 | +| 2020-01-01T00:01:59Z | 15 | +| 2020-01-01T00:02:59Z | 11 | +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} +{{%/flex-content %}} +{{< /flex >}} diff --git a/content/influxdb/v2.5/query-data/flux/flux-version.md b/content/influxdb/v2.5/query-data/flux/flux-version.md new file mode 100644 index 000000000..8946e15ed --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/flux-version.md @@ -0,0 +1,145 @@ +--- +title: Query the Flux version +seotitle: Query the version of Flux installed in InfluxDB +list_title: Query the Flux version +description: > + Use `runtime.version()` to return the version of Flux installed in InfluxDB. +weight: 221 +menu: + influxdb_2_5: + parent: Query with Flux + name: Flux version +influxdb/v2.5/tags: [query] +related: + - /{{< latest "flux" >}}/stdlib/runtime/version/ +list_code_example: | + ```js + import "array" + import "runtime" + + array.from(rows: [{version: runtime.version()}]) + ``` +--- + +InfluxDB {{< current-version >}} includes specific version of Flux that may or +may not support documented Flux functionality. +It's important to know what version of Flux you're currently using and what +functions are supported in that specific version. + +To query the version of Flux installed with InfluxDB, use `array.from()` to +create an ad hoc stream of tables and `runtime.version()` to populate a column +with the Flux version. + +{{% note %}} +Because the InfluxDB `/api/v2/query` endpoint can only return a stream of tables +and not single scalar values, you must use `array.from()` to create a stream of tables. +{{% /note %}} + +Run the following query in the **InfluxDB user interface**, with the **`influx` CLI**, +or **InfluxDB API**: + +```js +import "array" +import "runtime" + +array.from(rows: [{version: runtime.version()}]) +``` + +{{< tabs-wrapper >}} +{{% tabs %}} +[InfluxDB UI](#) +[influx CLI](#) +[InfluxDB API](#) +{{% /tabs %}} +{{% tab-content %}} + +To return the version of Flux installed with InfluxDB using the InfluxDB UI: + +1. Click **Data Explorer** in the left navigation bar. + + {{< nav-icon "data-explorer" >}} + +2. Click **{{% caps %}}Script Editor{{% /caps %}}** to manually create and + edit a Flux query. +3. Enable the **View Raw Data {{< icon "toggle" >}}** toggle or select one of the + following visualization types: + + - [Single Stat](/influxdb/v2.5/visualize-data/visualization-types/single-stat/) + - [Table](/influxdb/v2.5/visualize-data/visualization-types/table/) + +4. Enter and run the following query: + + ```js + import "array" + import "runtime" + + array.from(rows: [{version: runtime.version()}]) + ``` + +{{% /tab-content %}} +{{% tab-content %}} + +To return the version of Flux installed with InfluxDB using the `influx` CLI, +use the `influx query` command. Provide the following: + +- InfluxDB **host**, **organization**, and **API token** + _(the example below assumes that a + [CLI configuration](/influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials) + is set up and active)_ +- Query to execute + +```sh +$ influx query \ + 'import "array" + import "runtime" + + array.from(rows: [{version: runtime.version()}])' + +# Output +Result: _result +Table: keys: [] + version:string +---------------------- + v0.161.0 +``` +{{% /tab-content %}} + +{{% tab-content %}} + +To return the version of Flux installed with InfluxDB using the InfluxDB API, +use the [`/api/v2/query` endpoint](/influxdb/v2.5/api/#tag/Query). + +{{< api-endpoint method="POST" endpoint="http://localhost:8086/api/v2/query" >}} +Provide the following: + +- InfluxDB {{% cloud-only %}}Cloud{{% /cloud-only %}} host +- InfluxDB organization name or ID as a query parameter +- `Authorization` header with the `Token` scheme and your API token +- `Accept: application/csv` header +- `Content-type: application/vnd.flux` header +- Query to execute as the request body + +```sh +curl --request POST \ + http://localhost:8086/api/v2/query?orgID=INFLUX_ORG_ID \ + --header 'Authorization: Token INFLUX_TOKEN' \ + --header 'Accept: application/csv' \ + --header 'Content-type: application/vnd.flux' \ + --data 'import "array" + import "runtime" + + array.from(rows: [{version: runtime.version()}])' + +# Output +,result,table,version +,_result,0,v0.161.0 +``` + +{{% /tab-content %}} + +{{% warn %}} +#### Flux version in the Flux REPL +When you run `runtime.version()` in the [Flux REPL](/influxdb/v2.5/tools/flux-repl/), +the function returns the version of Flux the REPL was built with, not the version +of Flux installed in the instance of InfluxDB you're querying. +{{% /warn %}} \ No newline at end of file diff --git a/content/influxdb/v2.5/query-data/flux/geo/_index.md b/content/influxdb/v2.5/query-data/flux/geo/_index.md new file mode 100644 index 000000000..497fde268 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/geo/_index.md @@ -0,0 +1,69 @@ +--- +title: Work with geo-temporal data +list_title: Geo-temporal data +description: > + Use the Flux Geo package to filter geo-temporal data and group by geographic location or track. +menu: + influxdb_2_5: + name: Geo-temporal data + parent: Query with Flux +weight: 220 +list_code_example: | + ```js + import "experimental/geo" + + sampleGeoData + |> geo.filterRows(region: {lat: 30.04, lon: 31.23, radius: 200.0}) + |> geo.groupByArea(newColumn: "geoArea", level: 5) + ``` +--- + +Use the [Flux Geo package](/{{< latest "flux" >}}/stdlib/experimental/geo) to +filter geo-temporal data and group by geographic location or track. + +{{% warn %}} +The Geo package is experimental and subject to change at any time. +By using it, you agree to the [risks of experimental functions](/{{< latest "flux" >}}/stdlib/experimental/to/#experimental-functions-are-subject-to-change). +{{% /warn %}} + +**To work with geo-temporal data:** + +1. Import the `experimental/geo` package. + + ```js + import "experimental/geo" + ``` + +2. Load geo-temporal data. _See below for [sample geo-temporal data](#sample-data)._ +3. Do one or more of the following: + + - [Shape data to work with the Geo package](#shape-data-to-work-with-the-geo-package) + - [Filter data by region](#filter-geo-temporal-data-by-region) (using strict or non-strict filters) + - [Group data by area or by track](#group-geo-temporal-data) + +{{< children >}} + +--- + +## Sample data +Many of the examples in this section use a `sampleGeoData` variable that represents +a sample set of geo-temporal data. +The [Bird Migration Sample Data](/influxdb/v2.5/reference/sample-data/#bird-migration-sample-data) +provides sample geo-temporal data that meets the +[requirements of the Flux Geo package](/{{< latest "flux" >}}/stdlib/experimental/geo/#geo-schema-requirements). + +### Load bird migration sample data +Use the [`sample.data()` function](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/sample/data/) +to load the sample bird migration data: + +```js +import "influxdata/influxdb/sample" + +sampleGeoData = sample.data(set: "birdMigration") +``` + +{{% note %}} +`sample.data()` downloads sample data each time you execute the query **(~1.3 MB)**. +If bandwidth is a concern, use the [`to()` function](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/to/) +to write the data to a bucket, and then query the bucket with [`from()`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/from/). +{{% /note %}} diff --git a/content/influxdb/v2.5/query-data/flux/geo/filter-by-region.md b/content/influxdb/v2.5/query-data/flux/geo/filter-by-region.md new file mode 100644 index 000000000..965edbdad --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/geo/filter-by-region.md @@ -0,0 +1,124 @@ +--- +title: Filter geo-temporal data by region +description: > + Use the `geo.filterRows` function to filter geo-temporal data by box-shaped, circular, or polygonal geographic regions. +menu: + influxdb_2_5: + name: Filter by region + parent: Geo-temporal data +weight: 302 +related: + - /{{< latest "flux" >}}/stdlib/experimental/geo/ + - /{{< latest "flux" >}}/stdlib/experimental/geo/filterrows/ +list_code_example: | + ```js + import "experimental/geo" + + sampleGeoData + |> geo.filterRows(region: {lat: 30.04, lon: 31.23, radius: 200.0}, strict: true) + ``` +--- + +Use the [`geo.filterRows` function](/{{< latest "flux" >}}/stdlib/experimental/geo/filterrows/) +to filter geo-temporal data by geographic region: + +1. [Define a geographic region](#define-a-geographic-region) +2. [Use strict or non-strict filtering](#strict-and-non-strict-filtering) + +The following example uses the [sample bird migration data](/influxdb/v2.5/query-data/flux/geo/#sample-data) +and queries data points **within 200km of Cairo, Egypt**: + +```js +import "experimental/geo" + +sampleGeoData + |> geo.filterRows(region: {lat: 30.04, lon: 31.23, radius: 200.0}, strict: true) +``` + +## Define a geographic region +Many functions in the Geo package filter data based on geographic region. +Define a geographic region using one of the the following shapes: + +- [box](#box) +- [circle](#circle) +- [polygon](#polygon) + +### box +Define a box-shaped region by specifying a record containing the following properties: + +- **minLat:** minimum latitude in decimal degrees (WGS 84) _(Float)_ +- **maxLat:** maximum latitude in decimal degrees (WGS 84) _(Float)_ +- **minLon:** minimum longitude in decimal degrees (WGS 84) _(Float)_ +- **maxLon:** maximum longitude in decimal degrees (WGS 84) _(Float)_ + +##### Example box-shaped region +```js +{ + minLat: 40.51757813, + maxLat: 40.86914063, + minLon: -73.65234375, + maxLon: -72.94921875, +} +``` + +### circle +Define a circular region by specifying a record containing the following properties: + +- **lat**: latitude of the circle center in decimal degrees (WGS 84) _(Float)_ +- **lon**: longitude of the circle center in decimal degrees (WGS 84) _(Float)_ +- **radius**: radius of the circle in kilometers (km) _(Float)_ + +##### Example circular region +```js +{ + lat: 40.69335938, + lon: -73.30078125, + radius: 20.0, +} +``` + +### polygon +Define a polygonal region with a record containing the latitude and longitude for +each point in the polygon: + +- **points**: points that define the custom polygon _(Array of records)_ + + Define each point with a record containing the following properties: + + - **lat**: latitude in decimal degrees (WGS 84) _(Float)_ + - **lon**: longitude in decimal degrees (WGS 84) _(Float)_ + +##### Example polygonal region +```js +{ + points: [ + {lat: 40.671659, lon: -73.936631}, + {lat: 40.706543, lon: -73.749177}, + {lat: 40.791333, lon: -73.880327}, + ] +} +``` + +## Strict and non-strict filtering +In most cases, the specified geographic region does not perfectly align with S2 grid cells. + +- **Non-strict filtering** returns points that may be outside of the specified region but + inside S2 grid cells partially covered by the region. +- **Strict filtering** returns only points inside the specified region. + +_Strict filtering is less performant, but more accurate than non-strict filtering._ + + S2 grid cell + Filter region + Returned point + +{{< flex >}} +{{% flex-content %}} +**Strict filtering** +{{< svg "/static/svgs/geo-strict.svg" >}} +{{% /flex-content %}} +{{% flex-content %}} +**Non-strict filtering** +{{< svg "/static/svgs/geo-non-strict.svg" >}} +{{% /flex-content %}} +{{< /flex >}} diff --git a/content/influxdb/v2.5/query-data/flux/geo/group-geo-data.md b/content/influxdb/v2.5/query-data/flux/geo/group-geo-data.md new file mode 100644 index 000000000..bfe4e5592 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/geo/group-geo-data.md @@ -0,0 +1,73 @@ +--- +title: Group geo-temporal data +description: > + Use the `geo.groupByArea()` to group geo-temporal data by area and `geo.asTracks()` + to group data into tracks or routes. +menu: + influxdb_2_5: + parent: Geo-temporal data +weight: 302 +related: + - /{{< latest "flux" >}}/stdlib/experimental/geo/ + - /{{< latest "flux" >}}/stdlib/experimental/geo/groupbyarea/ + - /{{< latest "flux" >}}/stdlib/experimental/geo/astracks/ +list_code_example: | + ```js + import "experimental/geo" + + sampleGeoData + |> geo.groupByArea(newColumn: "geoArea", level: 5) + |> geo.asTracks(groupBy: ["id"],sortBy: ["_time"]) + ``` +--- + +Use the `geo.groupByArea()` to group geo-temporal data by area and `geo.asTracks()` +to group data into tracks or routes. + +- [Group data by area](#group-data-by-area) +- [Group data into tracks or routes](#group-data-by-track-or-route) + +{{% note %}} +For example results, use the [bird migration sample data](/influxdb/v2.5/reference/sample-data/#bird-migration-sample-data) +to populate the `sampleGeoData` variable in the queries below. +{{% /note %}} + +### Group data by area +Use the [`geo.groupByArea()` function](/{{< latest "flux" >}}/stdlib/experimental/geo/groupbyarea/) +to group geo-temporal data points by geographic area. +Areas are determined by [S2 grid cells](https://s2geometry.io/devguide/s2cell_hierarchy.html#s2cellid-numbering) + +- Specify a new column to store the unique area identifier for each point with the `newColumn` parameter. +- Specify the [S2 cell level](https://s2geometry.io/resources/s2cell_statistics) + to use when calculating geographic areas with the `level` parameter. + +The following example uses the [sample bird migration data](/influxdb/v2.5/query-data/flux/geo/#sample-data) +to query data points within 200km of Cairo, Egypt and group them by geographic area: + +```js +import "experimental/geo" + +sampleGeoData + |> geo.filterRows(region: {lat: 30.04, lon: 31.23, radius: 200.0}) + |> geo.groupByArea(newColumn: "geoArea", level: 5) +``` + +### Group data by track or route +Use [`geo.asTracks()` function](/{{< latest "flux" >}}/stdlib/experimental/geo/astracks/) +to group data points into tracks or routes and order them by time or other columns. +Data must contain a unique identifier for each track. For example: `id` or `tid`. + +- Specify columns that uniquely identify each track or route with the `groupBy` parameter. +- Specify which columns to sort by with the `sortBy` parameter. Default is `["_time"]`. + +The following example uses the [sample bird migration data](/influxdb/v2.5/query-data/flux/geo/#sample-data) +to query data points within 200km of Cairo, Egypt and group them into routes unique +to each bird: + +```js +import "experimental/geo" + +sampleGeoData + |> geo.filterRows(region: {lat: 30.04, lon: 31.23, radius: 200.0}) + |> geo.asTracks(groupBy: ["id"], orderBy: ["_time"]) +``` diff --git a/content/influxdb/v2.5/query-data/flux/geo/shape-geo-data.md b/content/influxdb/v2.5/query-data/flux/geo/shape-geo-data.md new file mode 100644 index 000000000..ad6babce1 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/geo/shape-geo-data.md @@ -0,0 +1,120 @@ +--- +title: Shape data to work with the Geo package +description: > + Functions in the Flux Geo package require **lat** and **lon** fields and an **s2_cell_id** tag. + Rename latitude and longitude fields and generate S2 cell ID tokens. +menu: + influxdb_2_5: + name: Shape geo-temporal data + parent: Geo-temporal data +weight: 301 +related: + - /{{< latest "flux" >}}/stdlib/experimental/geo/ + - /{{< latest "flux" >}}/stdlib/experimental/geo/shapedata/ +list_code_example: | + ```js + import "experimental/geo" + + sampleGeoData + |> geo.shapeData(latField: "latitude", lonField: "longitude", level: 10) + ``` +--- + +Functions in the Geo package require the following data schema: + +- an **s2_cell_id** tag containing the [S2 Cell ID](https://s2geometry.io/devguide/s2cell_hierarchy.html#s2cellid-numbering) + **as a token** +- a **`lat` field** field containing the **latitude in decimal degrees** (WGS 84) +- a **`lon` field** field containing the **longitude in decimal degrees** (WGS 84) + +## Shape geo-temporal data +If your data already contains latitude and longitude fields, use the +[`geo.shapeData()`function](/{{< latest "flux" >}}/stdlib/experimental/geo/shapedata/) +to rename the fields to match the requirements of the Geo package, pivot the data +into row-wise sets, and generate S2 cell ID tokens for each point. + +```js +import "experimental/geo" + +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "example-measurement") + |> geo.shapeData(latField: "latitude", lonField: "longitude", level: 10) +``` + +## Generate S2 cell ID tokens +The Geo package uses the [S2 Geometry Library](https://s2geometry.io/) to represent +geographic coordinates on a three-dimensional sphere. +The sphere is divided into [cells](https://s2geometry.io/devguide/s2cell_hierarchy), +each with a unique 64-bit identifier (S2 cell ID). +Grid and S2 cell ID accuracy are defined by a [level](https://s2geometry.io/resources/s2cell_statistics). + +{{% note %}} +To filter more quickly, use higher S2 Cell ID levels, +but know that that higher levels increase [series cardinality](/influxdb/v2.5/reference/glossary/#series-cardinality). +{{% /note %}} + +The Geo package requires S2 cell IDs as tokens. +To generate add S2 cell IDs tokens to your data, use one of the following options: + +- [Generate S2 cell ID tokens with Telegraf](#generate-s2-cell-id-tokens-with-telegraf) +- [Generate S2 cell ID tokens language-specific libraries](#generate-s2-cell-id-tokens-language-specific-libraries) +- [Generate S2 cell ID tokens with Flux](#generate-s2-cell-id-tokens-with-flux) + +### Generate S2 cell ID tokens with Telegraf +Enable the [Telegraf S2 Geo (`s2geo`) processor](https://github.com/influxdata/telegraf/tree/master/plugins/processors/s2geo) +to generate S2 cell ID tokens at a specified `cell_level` using `lat` and `lon` field values. + +Add the `processors.s2geo` configuration to your Telegraf configuration file (`telegraf.conf`): + +```toml +[[processors.s2geo]] + ## The name of the lat and lon fields containing WGS-84 latitude and + ## longitude in decimal degrees. + lat_field = "lat" + lon_field = "lon" + + ## New tag to create + tag_key = "s2_cell_id" + + ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) + cell_level = 9 +``` + +Telegraf stores the S2 cell ID token in the `s2_cell_id` tag. + +### Generate S2 cell ID tokens language-specific libraries +Many programming languages offer S2 Libraries with methods for generating S2 cell ID tokens. +Use latitude and longitude with the `s2.CellID.ToToken` endpoint of the S2 Geometry +Library to generate `s2_cell_id` tags. For example: + +- **Go:** [s2.CellID.ToToken()](https://godoc.org/github.com/golang/geo/s2#CellID.ToToken) +- **Python:** [s2sphere.CellId.to_token()](https://s2sphere.readthedocs.io/en/latest/api.html#s2sphere.CellId) +- **Crystal:** [cell.to_token(level)](https://github.com/spider-gazelle/s2_cells#usage) +- **JavaScript:** [s2.cellid.toToken()](https://github.com/mapbox/node-s2/blob/master/API.md#cellidtotoken---string) + +### Generate S2 cell ID tokens with Flux +Use the [`geo.s2CellIDToken()` function](/{{< latest "flux" >}}/stdlib/experimental/geo/s2cellidtoken/) +with existing longitude (`lon`) and latitude (`lat`) field values to generate and add the S2 cell ID token. +First, use the [`geo.toRows()` function](/{{< latest "flux" >}}/stdlib/experimental/geo/torows/) +to pivot **lat** and **lon** fields into row-wise sets: + +```js +import "experimental/geo" + +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "example-measurement") + |> geo.toRows() + |> map( + fn: (r) => ({ + r with + s2_cell_id: geo.s2CellIDToken(point: {lon: r.lon, lat: r.lat}, level: 10) + }) + ) +``` + +{{% note %}} +The [`geo.shapeData()`function](/{{< latest "flux" >}}/stdlib/experimental/geo/shapedata/) +generates S2 cell ID tokens as well. +{{% /note %}} diff --git a/content/influxdb/v2.5/query-data/flux/group-data.md b/content/influxdb/v2.5/query-data/flux/group-data.md new file mode 100644 index 000000000..46bc9b920 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/group-data.md @@ -0,0 +1,676 @@ +--- +title: Group data in InfluxDB with Flux +list_title: Group +description: > + Use `group()` to group data with common values in specific columns. +influxdb/v2.5/tags: [group] +menu: + influxdb_2_5: + name: Group + parent: Query with Flux +weight: 202 +aliases: + - /influxdb/v2.5/query-data/guides/group-data/ + - /influxdb/v2.5/query-data/flux/grouping-data/ +related: + - /{{< latest "flux" >}}/stdlib/universe/group + - /{{< latest "flux" >}}/stdlib/experimental/group +list_query_example: group +--- + +With Flux, you can group data by any column in your queried data set. +"Grouping" partitions data into tables in which each row shares a common value for specified columns. +This guide walks through grouping data in Flux and provides examples of how data is shaped in the process. + +If you're just getting started with Flux queries, check out the following: + +- [Get started with Flux](/{{< latest "flux" >}}/get-started/) for a conceptual overview of Flux and parts of a Flux query. +- [Execute queries](/influxdb/v2.5/query-data/execute-queries/) to discover a variety of ways to run your queries. + +## Group keys +Every table has a **group key** – a list of columns for which every row in the table has the same value. + +###### Example group key +```js +[_start, _stop, _field, _measurement, host] +``` + +Grouping data in Flux is essentially defining the group key of output tables. +Understanding how modifying group keys shapes output data is key to successfully +grouping and transforming data into your desired output. + +## group() Function +Flux's [`group()` function](/{{< latest "flux" >}}/stdlib/universe/group) defines the +group key for output tables, i.e. grouping records based on values for specific columns. + +###### group() example +```js +dataStream + |> group(columns: ["cpu", "host"]) +``` + +###### Resulting group key +```js +[cpu, host] +``` + +The `group()` function has the following parameters: + +### columns +The list of columns to include or exclude (depending on the [mode](#mode)) in the grouping operation. + +### mode +The method used to define the group and resulting group key. +Possible values include `by` and `except`. + + +## Example grouping operations +To illustrate how grouping works, define a `dataSet` variable that queries System +CPU usage from the `example-bucket` bucket. +Filter the `cpu` tag so it only returns results for each numbered CPU core. + +### Data set +CPU used by system operations for all numbered CPU cores. +It uses a regular expression to filter only numbered cores. + +```js +dataSet = from(bucket: "example-bucket") + |> range(start: -2m) + |> filter(fn: (r) => r._field == "usage_system" and r.cpu =~ /cpu[0-9*]/) + |> drop(columns: ["host"]) +``` + +{{% note %}} +This example drops the `host` column from the returned data since the CPU data +is only tracked for a single host and it simplifies the output tables. +Don't drop the `host` column if monitoring multiple hosts. +{{% /note %}} + +{{% truncate %}} +``` +Table: keys: [_start, _stop, _field, _measurement, cpu] + _start:time _stop:time _field:string _measurement:string cpu:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:34:00.000000000Z 7.892107892107892 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:34:10.000000000Z 7.2 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:34:20.000000000Z 7.4 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:34:30.000000000Z 5.5 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:34:40.000000000Z 7.4 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:34:50.000000000Z 7.5 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:35:00.000000000Z 10.3 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:35:10.000000000Z 9.2 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:35:20.000000000Z 8.4 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:35:30.000000000Z 8.5 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:35:40.000000000Z 8.6 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:35:50.000000000Z 10.2 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:36:00.000000000Z 10.6 + +Table: keys: [_start, _stop, _field, _measurement, cpu] + _start:time _stop:time _field:string _measurement:string cpu:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:34:00.000000000Z 0.7992007992007992 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:34:10.000000000Z 0.7 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:34:20.000000000Z 0.7 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:34:30.000000000Z 0.4 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:34:40.000000000Z 0.7 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:34:50.000000000Z 0.7 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:35:00.000000000Z 1.4 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:35:10.000000000Z 1.2 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:35:20.000000000Z 0.8 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:35:30.000000000Z 0.8991008991008991 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:35:40.000000000Z 0.8008008008008008 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:35:50.000000000Z 0.999000999000999 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:36:00.000000000Z 1.1022044088176353 + +Table: keys: [_start, _stop, _field, _measurement, cpu] + _start:time _stop:time _field:string _measurement:string cpu:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:34:00.000000000Z 4.1 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:34:10.000000000Z 3.6 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:34:20.000000000Z 3.5 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:34:30.000000000Z 2.6 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:34:40.000000000Z 4.5 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:34:50.000000000Z 4.895104895104895 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:35:00.000000000Z 6.906906906906907 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:35:10.000000000Z 5.7 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:35:20.000000000Z 5.1 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:35:30.000000000Z 4.7 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:35:40.000000000Z 5.1 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:35:50.000000000Z 5.9 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:36:00.000000000Z 6.4935064935064934 + +Table: keys: [_start, _stop, _field, _measurement, cpu] + _start:time _stop:time _field:string _measurement:string cpu:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:34:00.000000000Z 0.5005005005005005 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:34:10.000000000Z 0.5 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:34:20.000000000Z 0.5 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:34:30.000000000Z 0.3 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:34:40.000000000Z 0.6 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:34:50.000000000Z 0.6 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:35:00.000000000Z 1.3986013986013985 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:35:10.000000000Z 0.9 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:35:20.000000000Z 0.5005005005005005 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:35:30.000000000Z 0.7 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:35:40.000000000Z 0.6 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:35:50.000000000Z 0.8 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:36:00.000000000Z 0.9 +``` +{{% /truncate %}} + +**Note that the group key is output with each table: `Table: keys: `.** + +![Group example data set](/img/flux/grouping-data-set.png) + +### Group by CPU +Group the `dataSet` stream by the `cpu` column. + +```js +dataSet + |> group(columns: ["cpu"]) +``` + +This won't actually change the structure of the data since it already has `cpu` +in the group key and is therefore grouped by `cpu`. +However, notice that it does change the group key: + +{{% truncate %}} +###### Group by CPU output tables +``` +Table: keys: [cpu] + cpu:string _stop:time _time:time _value:float _field:string _measurement:string _start:time +---------------------- ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ + cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 7.892107892107892 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:10.000000000Z 7.2 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:20.000000000Z 7.4 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:30.000000000Z 5.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:40.000000000Z 7.4 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:50.000000000Z 7.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:00.000000000Z 10.3 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:10.000000000Z 9.2 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:20.000000000Z 8.4 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:30.000000000Z 8.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:40.000000000Z 8.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:50.000000000Z 10.2 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:36:00.000000000Z 10.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [cpu] + cpu:string _stop:time _time:time _value:float _field:string _measurement:string _start:time +---------------------- ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ + cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 0.7992007992007992 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:10.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:20.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:30.000000000Z 0.4 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:40.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:50.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:00.000000000Z 1.4 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:10.000000000Z 1.2 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:20.000000000Z 0.8 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:30.000000000Z 0.8991008991008991 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:40.000000000Z 0.8008008008008008 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:50.000000000Z 0.999000999000999 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:36:00.000000000Z 1.1022044088176353 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [cpu] + cpu:string _stop:time _time:time _value:float _field:string _measurement:string _start:time +---------------------- ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ + cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 4.1 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:10.000000000Z 3.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:20.000000000Z 3.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:30.000000000Z 2.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:40.000000000Z 4.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:50.000000000Z 4.895104895104895 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:00.000000000Z 6.906906906906907 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:10.000000000Z 5.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:20.000000000Z 5.1 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:30.000000000Z 4.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:40.000000000Z 5.1 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:50.000000000Z 5.9 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:36:00.000000000Z 6.4935064935064934 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [cpu] + cpu:string _stop:time _time:time _value:float _field:string _measurement:string _start:time +---------------------- ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ + cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 0.5005005005005005 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:10.000000000Z 0.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:20.000000000Z 0.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:30.000000000Z 0.3 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:40.000000000Z 0.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:50.000000000Z 0.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:00.000000000Z 1.3986013986013985 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:10.000000000Z 0.9 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:20.000000000Z 0.5005005005005005 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:30.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:40.000000000Z 0.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:50.000000000Z 0.8 usage_system cpu 2018-11-05T21:34:00.000000000Z + cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.9 usage_system cpu 2018-11-05T21:34:00.000000000Z +``` +{{% /truncate %}} + +The visualization remains the same. + +![Group by CPU](/img/flux/grouping-data-set.png) + +### Group by time +Grouping data by the `_time` column is a good illustration of how grouping changes the structure of your data. + +```js +dataSet + |> group(columns: ["_time"]) +``` + +When grouping by `_time`, all records that share a common `_time` value are grouped into individual tables. +So each output table represents a single point in time. + +{{% truncate %}} +###### Group by time output tables +``` +Table: keys: [_time] + _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string +------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ---------------------- +2018-11-05T21:34:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 7.892107892107892 usage_system cpu cpu0 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.7992007992007992 usage_system cpu cpu1 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 4.1 usage_system cpu cpu2 +2018-11-05T21:34:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.5005005005005005 usage_system cpu cpu3 + +Table: keys: [_time] + _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string +------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ---------------------- +2018-11-05T21:34:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 7.2 usage_system cpu cpu0 +2018-11-05T21:34:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu cpu1 +2018-11-05T21:34:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 3.6 usage_system cpu cpu2 +2018-11-05T21:34:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.5 usage_system cpu cpu3 + +Table: keys: [_time] + _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string +------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ---------------------- +2018-11-05T21:34:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 7.4 usage_system cpu cpu0 +2018-11-05T21:34:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu cpu1 +2018-11-05T21:34:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 3.5 usage_system cpu cpu2 +2018-11-05T21:34:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.5 usage_system cpu cpu3 + +Table: keys: [_time] + _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string +------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ---------------------- +2018-11-05T21:34:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 5.5 usage_system cpu cpu0 +2018-11-05T21:34:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.4 usage_system cpu cpu1 +2018-11-05T21:34:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 2.6 usage_system cpu cpu2 +2018-11-05T21:34:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.3 usage_system cpu cpu3 + +Table: keys: [_time] + _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string +------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ---------------------- +2018-11-05T21:34:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 7.4 usage_system cpu cpu0 +2018-11-05T21:34:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu cpu1 +2018-11-05T21:34:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 4.5 usage_system cpu cpu2 +2018-11-05T21:34:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.6 usage_system cpu cpu3 + +Table: keys: [_time] + _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string +------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ---------------------- +2018-11-05T21:34:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 7.5 usage_system cpu cpu0 +2018-11-05T21:34:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu cpu1 +2018-11-05T21:34:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 4.895104895104895 usage_system cpu cpu2 +2018-11-05T21:34:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.6 usage_system cpu cpu3 + +Table: keys: [_time] + _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string +------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ---------------------- +2018-11-05T21:35:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 10.3 usage_system cpu cpu0 +2018-11-05T21:35:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 1.4 usage_system cpu cpu1 +2018-11-05T21:35:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 6.906906906906907 usage_system cpu cpu2 +2018-11-05T21:35:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 1.3986013986013985 usage_system cpu cpu3 + +Table: keys: [_time] + _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string +------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ---------------------- +2018-11-05T21:35:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 9.2 usage_system cpu cpu0 +2018-11-05T21:35:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 1.2 usage_system cpu cpu1 +2018-11-05T21:35:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 5.7 usage_system cpu cpu2 +2018-11-05T21:35:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.9 usage_system cpu cpu3 + +Table: keys: [_time] + _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string +------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ---------------------- +2018-11-05T21:35:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 8.4 usage_system cpu cpu0 +2018-11-05T21:35:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.8 usage_system cpu cpu1 +2018-11-05T21:35:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 5.1 usage_system cpu cpu2 +2018-11-05T21:35:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.5005005005005005 usage_system cpu cpu3 + +Table: keys: [_time] + _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string +------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ---------------------- +2018-11-05T21:35:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 8.5 usage_system cpu cpu0 +2018-11-05T21:35:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.8991008991008991 usage_system cpu cpu1 +2018-11-05T21:35:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 4.7 usage_system cpu cpu2 +2018-11-05T21:35:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu cpu3 + +Table: keys: [_time] + _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string +------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ---------------------- +2018-11-05T21:35:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 8.6 usage_system cpu cpu0 +2018-11-05T21:35:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.8008008008008008 usage_system cpu cpu1 +2018-11-05T21:35:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 5.1 usage_system cpu cpu2 +2018-11-05T21:35:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.6 usage_system cpu cpu3 + +Table: keys: [_time] + _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string +------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ---------------------- +2018-11-05T21:35:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 10.2 usage_system cpu cpu0 +2018-11-05T21:35:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.999000999000999 usage_system cpu cpu1 +2018-11-05T21:35:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 5.9 usage_system cpu cpu2 +2018-11-05T21:35:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.8 usage_system cpu cpu3 + +Table: keys: [_time] + _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string +------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ---------------------- +2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 10.6 usage_system cpu cpu0 +2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 1.1022044088176353 usage_system cpu cpu1 +2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 6.4935064935064934 usage_system cpu cpu2 +2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.9 usage_system cpu cpu3 +``` +{{% /truncate %}} + +Because each timestamp is structured as a separate table, when visualized, all +points that share the same timestamp appear connected. + +![Group by time](/img/flux/grouping-by-time.png) + +{{% note %}} +With some further processing, you could calculate the average CPU usage across all CPUs per point +of time and group them into a single table, but we won't cover that in this example. +If you're interested in running and visualizing this yourself, here's what the query would look like: + +```js +dataSet + |> group(columns: ["_time"]) + |> mean() + |> group(columns: ["_value", "_time"], mode: "except") +``` +{{% /note %}} + +## Group by CPU and time +Group by the `cpu` and `_time` columns. + +```js +dataSet + |> group(columns: ["cpu", "_time"]) +``` + +This outputs a table for every unique `cpu` and `_time` combination: + +{{% truncate %}} +###### Group by CPU and time output tables +``` +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:00.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 7.892107892107892 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:00.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.7992007992007992 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:00.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 4.1 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:00.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.5005005005005005 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:10.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 7.2 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:10.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:10.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 3.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:10.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:20.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 7.4 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:20.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:20.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 3.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:20.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:30.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 5.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:30.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.4 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:30.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 2.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:30.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.3 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:40.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 7.4 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:40.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:40.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 4.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:40.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:50.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 7.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:50.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:50.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 4.895104895104895 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:34:50.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:00.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 10.3 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:00.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 1.4 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:00.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 6.906906906906907 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:00.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 1.3986013986013985 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:10.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 9.2 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:10.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 1.2 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:10.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 5.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:10.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.9 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:20.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 8.4 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:20.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.8 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:20.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 5.1 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:20.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.5005005005005005 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:30.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 8.5 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:30.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.8991008991008991 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:30.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 4.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:30.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:40.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 8.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:40.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.8008008008008008 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:40.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 5.1 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:40.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:50.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 10.2 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:50.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.999000999000999 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:50.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 5.9 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:35:50.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.8 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:36:00.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 10.6 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:36:00.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 1.1022044088176353 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:36:00.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 6.4935064935064934 usage_system cpu 2018-11-05T21:34:00.000000000Z + +Table: keys: [_time, cpu] + _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time +------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------ +2018-11-05T21:36:00.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.9 usage_system cpu 2018-11-05T21:34:00.000000000Z +``` +{{% /truncate %}} + +When visualized, tables appear as individual, unconnected points. + +![Group by CPU and time](/img/flux/grouping-by-cpu-time.png) + +Grouping by `cpu` and `_time` is a good illustration of how grouping works. + +## In conclusion +Grouping is a powerful way to shape your data into your desired output format. +It modifies the group keys of output tables, grouping records into tables that +all share common values within specified columns. diff --git a/content/influxdb/v2.5/query-data/flux/histograms.md b/content/influxdb/v2.5/query-data/flux/histograms.md new file mode 100644 index 000000000..a9d9a88eb --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/histograms.md @@ -0,0 +1,171 @@ +--- +title: Create histograms with Flux +list_title: Histograms +description: > + Use `histogram()` to create cumulative histograms with Flux. +influxdb/v2.5/tags: [histogram] +menu: + influxdb_2_5: + name: Histograms + parent: Query with Flux +weight: 210 +aliases: + - /influxdb/v2.5/query-data/guides/histograms/ +related: + - /{{< latest "flux" >}}/stdlib/universe/histogram + - /{{< latest "flux" >}}/prometheus/metric-types/histogram/, Work with Prometheus histograms in Flux +list_query_example: histogram +--- + +Histograms provide valuable insight into the distribution of your data. +This guide walks through using Flux's `histogram()` function to transform your data into a **cumulative histogram**. + +If you're just getting started with Flux queries, check out the following: + +- [Get started with Flux](/{{< latest "flux" >}}/get-started/) for a conceptual overview of Flux and parts of a Flux query. +- [Execute queries](/influxdb/v2.5/query-data/execute-queries/) to discover a variety of ways to run your queries. + +## histogram() function + +The [`histogram()` function](/{{< latest "flux" >}}/stdlib/universe/histogram) approximates the +cumulative distribution of a dataset by counting data frequencies for a list of "bins." +A **bin** is simply a range in which a data point falls. +All data points that are less than or equal to the bound are counted in the bin. +In the histogram output, a column is added (`le`) that represents the upper bounds of of each bin. +Bin counts are cumulative. + +```js +from(bucket: "example-bucket") + |> range(start: -5m) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") + |> histogram(bins: [0.0, 10.0, 20.0, 30.0]) +``` + +{{% note %}} +Values output by the `histogram` function represent points of data aggregated over time. +Since values do not represent single points in time, there is no `_time` column in the output table. +{{% /note %}} + +## Bin helper functions +Flux provides two helper functions for generating histogram bins. +Each generates an array of floats designed to be used in the `histogram()` function's `bins` parameter. + +### linearBins() +The [`linearBins()` function](/{{< latest "flux" >}}/stdlib/universe/linearbins) generates a list of linearly separated floats. + +```js +linearBins(start: 0.0, width: 10.0, count: 10) + +// Generated list: [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, +Inf] +``` + +### logarithmicBins() +The [`logarithmicBins()` function](/{{< latest "flux" >}}/stdlib/universe/logarithmicbins) generates a list of exponentially separated floats. + +```js +logarithmicBins(start: 1.0, factor: 2.0, count: 10, infinity: true) + +// Generated list: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, +Inf] +``` + +## Histogram visualization +The [Histogram visualization type](/influxdb/v2.5/visualize-data/visualization-types/histogram/) +automatically converts query results into a binned and segmented histogram. + +{{< img-hd src="/img/influxdb/2-0-visualizations-histogram-example.png" alt="Histogram visualization" />}} + +Use the [Histogram visualization controls](/influxdb/v2.5/visualize-data/visualization-types/histogram/#histogram-controls) +to specify the number of bins and define groups in bins. + +### Histogram visualization data structure +Because the Histogram visualization uses visualization controls to creates bins and groups, +**do not** structure query results as histogram data. + +{{% note %}} +Output of the [`histogram()` function](#histogram-function) is **not** compatible +with the Histogram visualization type. +View the example [below](#visualize-errors-by-severity). +{{% /note %}} + +## Examples + +### Generate a histogram with linear bins +```js +from(bucket: "example-bucket") + |> range(start: -5m) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") + |> histogram(bins: linearBins(start: 65.5, width: 0.5, count: 20, infinity: false)) +``` + +###### Output table +``` +Table: keys: [_start, _stop, _field, _measurement, host] + _start:time _stop:time _field:string _measurement:string host:string le:float _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------ ---------------------------- ---------------------------- +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 65.5 5 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 66 6 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 66.5 8 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 67 9 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 67.5 9 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 68 10 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 68.5 12 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 69 12 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 69.5 15 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 70 23 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 70.5 30 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 71 30 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 71.5 30 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 72 30 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 72.5 30 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 73 30 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 73.5 30 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 74 30 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 74.5 30 +2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 75 30 +``` + +### Generate a histogram with logarithmic bins +```js +from(bucket: "example-bucket") + |> range(start: -5m) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") + |> histogram(bins: logarithmicBins(start: 0.5, factor: 2.0, count: 10, infinity: false)) +``` + +###### Output table +``` +Table: keys: [_start, _stop, _field, _measurement, host] + _start:time _stop:time _field:string _measurement:string host:string le:float _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------ ---------------------------- ---------------------------- +2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 0.5 0 +2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 1 0 +2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 2 0 +2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 4 0 +2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 8 0 +2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 16 0 +2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 32 0 +2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 64 2 +2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 128 30 +2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 256 30 +``` + +### Visualize errors by severity +Use the [Telegraf Syslog plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/syslog) +to collect error information from your system. +Query the `severity_code` field in the `syslog` measurement: + +```js +from(bucket: "example-bucket") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r._measurement == "syslog" and r._field == "severity_code") +``` + +In the Histogram visualization options, select `_time` as the **X Column** +and `severity` as the **Group By** option: + +{{< img-hd src="/img/influxdb/2-0-visualizations-histogram-errors.png" alt="Logs by severity histogram" />}} + +### Use Prometheus histograms in Flux + +_For information about working with Prometheus histograms in Flux, see +[Work with Prometheus histograms](/{{< latest "flux" >}}/prometheus/metric-types/histogram/)._ \ No newline at end of file diff --git a/content/influxdb/v2.5/query-data/flux/increase.md b/content/influxdb/v2.5/query-data/flux/increase.md new file mode 100644 index 000000000..a17b78042 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/increase.md @@ -0,0 +1,57 @@ +--- +title: Calculate the increase +seotitle: Calculate the increase in Flux +list_title: Increase +description: > + Use `increase()` to track increases across multiple columns in a table. + This function is especially useful when tracking changes in counter values that + wrap over time or periodically reset. +weight: 210 +menu: + influxdb_2_5: + parent: Query with Flux + name: Increase +influxdb/v2.5/tags: [query, increase, counters] +related: + - /{{< latest "flux" >}}/stdlib/universe/increase/ +list_query_example: increase +--- + +Use [`increase()`](/{{< latest "flux" >}}/stdlib/universe/increase/) +to track increases across multiple columns in a table. +This function is especially useful when tracking changes in counter values that +wrap over time or periodically reset. + +```js +data + |> increase() +``` + +`increase()` returns a cumulative sum of **non-negative** differences between rows in a table. +For example: + +{{< flex >}} +{{% flex-content %}} +**Given the following input:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | 1 | +| 2020-01-01T00:02:00Z | 2 | +| 2020-01-01T00:03:00Z | 8 | +| 2020-01-01T00:04:00Z | 10 | +| 2020-01-01T00:05:00Z | 0 | +| 2020-01-01T00:06:00Z | 4 | +{{% /flex-content %}} +{{% flex-content %}} +**`increase()` returns:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:02:00Z | 1 | +| 2020-01-01T00:03:00Z | 7 | +| 2020-01-01T00:04:00Z | 9 | +| 2020-01-01T00:05:00Z | 9 | +| 2020-01-01T00:06:00Z | 13 | +{{% /flex-content %}} +{{< /flex >}} diff --git a/content/influxdb/v2.5/query-data/flux/join.md b/content/influxdb/v2.5/query-data/flux/join.md new file mode 100644 index 000000000..4cfaf9048 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/join.md @@ -0,0 +1,401 @@ +--- +title: Join data with Flux +seotitle: Join data in InfluxDB with Flux +list_title: Join +description: This guide walks through joining data with Flux and outlines how it shapes your data in the process. +influxdb/v2.5/tags: [join, flux] +menu: + influxdb_2_5: + name: Join + parent: Query with Flux +weight: 210 +aliases: + - /influxdb/v2.5/query-data/guides/join/ +related: + - /{{< latest "flux" >}}/join-data/ + - /{{< latest "flux" >}}/join-data/inner/ + - /{{< latest "flux" >}}/join-data/left-outer/ + - /{{< latest "flux" >}}/join-data/right-outer/ + - /{{< latest "flux" >}}/join-data/full-outer/ + - /{{< latest "flux" >}}/join-data/time/ + - /{{< latest "flux" >}}/stdlib/join/ +list_query_example: join-new +--- + +Use the Flux [`join` package](/{{< latest "flux" >}}/stdlib/join/) to join two data sets +based on common values using the following join methods: + +{{< flex >}} +{{< flex-content "quarter" >}} +

Inner join

+ {{< svg svg="static/svgs/join-diagram.svg" class="inner small center" >}} +{{< /flex-content >}} +{{< flex-content "quarter" >}} +

Left outer join

+ {{< svg svg="static/svgs/join-diagram.svg" class="left small center" >}} +{{< /flex-content >}} +{{< flex-content "quarter" >}} +

Right outer join

+ {{< svg svg="static/svgs/join-diagram.svg" class="right small center" >}} +{{< /flex-content >}} +{{< flex-content "quarter" >}} +

Full outer join

+ {{< svg svg="static/svgs/join-diagram.svg" class="full small center" >}} +{{< /flex-content >}} +{{< /flex >}} + +The join package lets you join data from different data sources such as +[InfluxDB](/{{< latest "flux" >}}/query-data/influxdb/), [SQL database](/{{< latest "flux" >}}/query-data/sql/), +[CSV](/{{< latest "flux" >}}/query-data/csv/), and [others](/{{< latest "flux" >}}/query-data/). + +## Use join functions to join your data + +{{< tabs-wrapper >}} +{{% tabs %}} +[Inner join](#) +[Left join](#) +[Right join](#) +[Full outer join](#) +[Join on time](#) +{{% /tabs %}} + + +{{% tab-content %}} + +1. Import the `join` package. +2. Define the **left** and **right** data streams to join: + + - Each stream must have one or more columns with common values. + Column labels do not need to match, but column values do. + - Each stream should have identical [group keys](/{{< latest "flux" >}}/get-started/data-model/#group-key). + + _For more information, see [join data requirements](/{{< latest "flux" >}}/join-data/#data-requirements)._ + +3. Use [`join.inner()`](/{{< latest "flux" >}}/stdlib/join/inner/) to join the two streams together. + Provide the following required parameters: + + - `left`: Stream of data representing the left side of the join. + - `right`: Stream of data representing the right side of the join. + - `on`: [Join predicate](/{{< latest "flux" >}}/join-data/#join-predicate-function-on). + For example: `(l, r) => l.column == r.column`. + - `as`: [Join output function](/{{< latest "flux" >}}/join-data/#join-output-function-as) + that returns a record with values from each input stream. + For example: `(l, r) => ({l with column1: r.column1, column2: r.column2})`. + +```js +import "join" +import "sql" + +left = + from(bucket: "example-bucket-1") + |> range(start: "-1h") + |> filter(fn: (r) => r._measurement == "example-measurement") + |> filter(fn: (r) => r._field == "example-field") + +right = + sql.from( + driverName: "postgres", + dataSourceName: "postgresql://username:password@localhost:5432", + query: "SELECT * FROM example_table", + ) + +join.inner( + left: left, + right: right, + on: (l, r) => l.column == r.column, + as: (l, r) => ({l with name: r.name, location: r.location}), +) +``` + +For more information and detailed examples, see [Perform an inner join](/{{< latest "flux" >}}/join-data/inner/) +in the Flux documentation. + +{{% /tab-content %}} + + + +{{% tab-content %}} + +1. Import the `join` package. +2. Define the **left** and **right** data streams to join: + + - Each stream must have one or more columns with common values. + Column labels do not need to match, but column values do. + - Each stream should have identical [group keys](/{{< latest "flux" >}}/get-started/data-model/#group-key). + + _For more information, see [join data requirements](/{{< latest "flux" >}}/join-data/#data-requirements)._ + +3. Use [`join.left()`](/{{< latest "flux" >}}/stdlib/join/left/) to join the two streams together. + Provide the following required parameters: + + - `left`: Stream of data representing the left side of the join. + - `right`: Stream of data representing the right side of the join. + - `on`: [Join predicate](/{{< latest "flux" >}}/join-data/#join-predicate-function-on). + For example: `(l, r) => l.column == r.column`. + - `as`: [Join output function](/{{< latest "flux" >}}/join-data/#join-output-function-as) + that returns a record with values from each input stream. + For example: `(l, r) => ({l with column1: r.column1, column2: r.column2})`. + +```js +import "join" +import "sql" + +left = + from(bucket: "example-bucket-1") + |> range(start: "-1h") + |> filter(fn: (r) => r._measurement == "example-measurement") + |> filter(fn: (r) => r._field == "example-field") + +right = + sql.from( + driverName: "postgres", + dataSourceName: "postgresql://username:password@localhost:5432", + query: "SELECT * FROM example_table", + ) + +join.left( + left: left, + right: right, + on: (l, r) => l.column == r.column, + as: (l, r) => ({l with name: r.name, location: r.location}), +) +``` + +For more information and detailed examples, see [Perform a left outer join](/{{< latest "flux" >}}/join-data/left-outer/) +in the Flux documentation. + +{{% /tab-content %}} + + + +{{% tab-content %}} + +1. Import the `join` package. +2. Define the **left** and **right** data streams to join: + + - Each stream must have one or more columns with common values. + Column labels do not need to match, but column values do. + - Each stream should have identical [group keys](/{{< latest "flux" >}}/get-started/data-model/#group-key). + + _For more information, see [join data requirements](/{{< latest "flux" >}}/join-data/#data-requirements)._ + +3. Use [`join.right()`](/{{< latest "flux" >}}/stdlib/join/right/) to join the two streams together. + Provide the following required parameters: + + - `left`: Stream of data representing the left side of the join. + - `right`: Stream of data representing the right side of the join. + - `on`: [Join predicate](/{{< latest "flux" >}}/join-data/#join-predicate-function-on). + For example: `(l, r) => l.column == r.column`. + - `as`: [Join output function](/{{< latest "flux" >}}/join-data/#join-output-function-as) + that returns a record with values from each input stream. + For example: `(l, r) => ({l with column1: r.column1, column2: r.column2})`. + +```js +import "join" +import "sql" + +left = + from(bucket: "example-bucket-1") + |> range(start: "-1h") + |> filter(fn: (r) => r._measurement == "example-measurement") + |> filter(fn: (r) => r._field == "example-field") + +right = + sql.from( + driverName: "postgres", + dataSourceName: "postgresql://username:password@localhost:5432", + query: "SELECT * FROM example_table", + ) + +join.right( + left: left, + right: right, + on: (l, r) => l.column == r.column, + as: (l, r) => ({l with name: r.name, location: r.location}), +) +``` + +For more information and detailed examples, see [Perform a right outer join](/{{< latest "flux" >}}/join-data/right-outer/) +in the Flux documentation. + +{{% /tab-content %}} + + + +{{% tab-content %}} +1. Import the `join` package. +2. Define the **left** and **right** data streams to join: + + - Each stream must have one or more columns with common values. + Column labels do not need to match, but column values do. + - Each stream should have identical [group keys](/{{< latest "flux" >}}/get-started/data-model/#group-key). + + _For more information, see [join data requirements](/{{< latest "flux" >}}/join-data/#data-requirements)._ + +3. Use [`join.full()`](/{{< latest "flux" >}}/stdlib/join/full/) to join the two streams together. + Provide the following required parameters: + + - `left`: Stream of data representing the left side of the join. + - `right`: Stream of data representing the right side of the join. + - `on`: [Join predicate](/{{< latest "flux" >}}/join-data/#join-predicate-function-on). + For example: `(l, r) => l.column == r.column`. + - `as`: [Join output function](/{{< latest "flux" >}}/join-data/#join-output-function-as) + that returns a record with values from each input stream. + For example: `(l, r) => ({l with column1: r.column1, column2: r.column2})`. + +{{% note %}} +Full outer joins must account for non-group-key columns in both `l` and `r` +records being null. Use conditional logic to check which record contains non-null +values for columns not in the group key. +For more information, see [Account for missing, non-group-key values](/{{< latest "flux" >}}/join-data/full-outer/#account-for-missing-non-group-key-values). +{{% /note %}} + +```js +import "join" +import "sql" + +left = + from(bucket: "example-bucket-1") + |> range(start: "-1h") + |> filter(fn: (r) => r._measurement == "example-measurement") + |> filter(fn: (r) => r._field == "example-field") + +right = + sql.from( + driverName: "postgres", + dataSourceName: "postgresql://username:password@localhost:5432", + query: "SELECT * FROM example_table", + ) + +join.full( + left: left, + right: right, + on: (l, r) => l.id== r.id, + as: (l, r) => { + id = if exists l.id then l.id else r.id + + return {name: l.name, location: r.location, id: id} + }, +) +``` + +For more information and detailed examples, see [Perform a full outer join](/{{< latest "flux" >}}/join-data/full-outer/) +in the Flux documentation. + +{{% /tab-content %}} + + + +{{% tab-content %}} + +1. Import the `join` package. +2. Define the **left** and **right** data streams to join: + + - Each stream must also have a `_time` column. + - Each stream must have one or more columns with common values. + Column labels do not need to match, but column values do. + - Each stream should have identical [group keys](/{{< latest "flux" >}}/get-started/data-model/#group-key). + + _For more information, see [join data requirements](/{{< latest "flux" >}}/join-data/#data-requirements)._ + +3. Use [`join.time()`](/{{< latest "flux" >}}/stdlib/join/time/) to join the two streams + together based on time values. + Provide the following parameters: + + - `left`: ({{< req >}}) Stream of data representing the left side of the join. + - `right`: ({{< req >}}) Stream of data representing the right side of the join. + - `as`: ({{< req >}}) [Join output function](/{{< latest "flux" >}}/join-data/#join-output-function-as) + that returns a record with values from each input stream. + For example: `(l, r) => ({r with column1: l.column1, column2: l.column2})`. + - `method`: Join method to use. Default is `inner`. + +```js +import "join" +import "sql" + +left = + from(bucket: "example-bucket-1") + |> range(start: "-1h") + |> filter(fn: (r) => r._measurement == "example-m1") + |> filter(fn: (r) => r._field == "example-f1") + +right = + from(bucket: "example-bucket-2") + |> range(start: "-1h") + |> filter(fn: (r) => r._measurement == "example-m2") + |> filter(fn: (r) => r._field == "example-f2") + +join.time(method: "left", left: left, right: right, as: (l, r) => ({l with f2: r._value})) +``` + +For more information and detailed examples, see [Join on time](/{{< latest "flux" >}}/join-data/time/) +in the Flux documentation. + +{{% /tab-content %}} + +{{< /tabs-wrapper >}} + +--- + +## When to use union and pivot instead of join functions + +We recommend using the `join` package to join streams that have mostly different +schemas or that come from two separate data sources. +If you're joining two datasets queried from InfluxDB, using +[`union()`](/{{< latest "flux" >}}/stdlib/universe/union/) and [`pivot()`](/{{< latest "flux" >}}/stdlib/universe/pivot/) +to combine the data will likely be more performant. + +For example, if you need to query fields from different InfluxDB buckets and align +field values in each row based on time: + +```js +f1 = + from(bucket: "example-bucket-1") + |> range(start: "-1h") + |> filter(fn: (r) => r._field == "f1") + |> drop(columns: "_measurement") + +f2 = + from(bucket: "example-bucket-2") + |> range(start: "-1h") + |> filter(fn: (r) => r._field == "f2") + |> drop(columns: "_measurement") + +union(tables: [f1, f2]) + |> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value") +``` +{{< expand-wrapper >}} +{{% expand "View example input and output data" %}} + +#### Input +{{< flex >}} +{{% flex-content %}} +##### f1 +| _time | _field | _value | +| :------------------- | :----- | -----: | +| 2020-01-01T00:01:00Z | f1 | 1 | +| 2020-01-01T00:02:00Z | f1 | 2 | +| 2020-01-01T00:03:00Z | f1 | 1 | +| 2020-01-01T00:04:00Z | f1 | 3 | +{{% /flex-content %}} +{{% flex-content %}} +##### f2 +| _time | _field | _value | +| :------------------- | :----- | -----: | +| 2020-01-01T00:01:00Z | f2 | 5 | +| 2020-01-01T00:02:00Z | f2 | 12 | +| 2020-01-01T00:03:00Z | f2 | 8 | +| 2020-01-01T00:04:00Z | f2 | 6 | +{{% /flex-content %}} +{{< /flex >}} + +#### Output +| _time | f1 | f2 | +| :------------------- | --: | --: | +| 2020-01-01T00:01:00Z | 1 | 5 | +| 2020-01-01T00:02:00Z | 2 | 12 | +| 2020-01-01T00:03:00Z | 1 | 8 | +| 2020-01-01T00:04:00Z | 3 | 6 | + +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/influxdb/v2.5/query-data/flux/mathematic-operations.md b/content/influxdb/v2.5/query-data/flux/mathematic-operations.md new file mode 100644 index 000000000..dc3bcd1ef --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/mathematic-operations.md @@ -0,0 +1,208 @@ +--- +title: Transform data with mathematic operations +seotitle: Transform data with mathematic operations in Flux +list_title: Transform data with math +description: > + Use `map()` to remap column values and apply mathematic operations. +influxdb/v2.5/tags: [math, flux] +menu: + influxdb_2_5: + name: Transform data with math + parent: Query with Flux +weight: 208 +aliases: + - /influxdb/v2.5/query-data/guides/mathematic-operations/ +related: + - /{{< latest "flux" >}}/stdlib/universe/map + - /{{< latest "flux" >}}/stdlib/universe/aggregates/reduce/ + - /{{< latest "flux" >}}/language/operators/ + - /{{< latest "flux" >}}/function-types/#type-conversions, Flux type-conversion functions + - /influxdb/v2.5/query-data/flux/calculate-percentages/ +list_query_example: map_math +--- + +[Flux](/{{< latest "flux" >}}/), InfluxData's data scripting and query language, +supports mathematic expressions in data transformations. +This article describes how to use [Flux arithmetic operators](/{{< latest "flux" >}}/spec/operators/#arithmetic-operators) +to "map" over data and transform values using mathematic operations. + +If you're just getting started with Flux queries, check out the following: + +- [Get started with Flux](/{{< latest "flux" >}}/get-started/) for a conceptual overview of Flux and parts of a Flux query. +- [Execute queries](/influxdb/v2.5/query-data/execute-queries/) to discover a variety of ways to run your queries. + +##### Basic mathematic operations +```js +// Examples executed using the Flux REPL +> 9 + 9 +18 +> 22 - 14 +8 +> 6 * 5 +30 +> 21 / 7 +3 +``` + +

See Flux Read-Eval-Print Loop (REPL).

+ +{{% note %}} +#### Operands must be the same type +Operands in Flux mathematic operations must be the same data type. +For example, integers cannot be used in operations with floats. +Otherwise, you will get an error similar to: + +``` +Error: type error: float != int +``` + +To convert operands to the same type, use [type-conversion functions](/{{< latest "flux" >}}/stdlib/universe/) +or manually format operands. +The operand data type determines the output data type. +For example: + +```js +100 // Parsed as an integer +100.0 // Parsed as a float + +// Example evaluations +> 20 / 8 +2 + +> 20.0 / 8.0 +2.5 +``` +{{% /note %}} + +## Custom mathematic functions +Flux lets you [create custom functions](/influxdb/v2.5/query-data/flux/custom-functions) that use mathematic operations. +View the examples below. + +###### Custom multiplication function +```js +multiply = (x, y) => x * y + +multiply(x: 10, y: 12) +// Returns 120 +``` + +###### Custom percentage function +```js +percent = (sample, total) => (sample / total) * 100.0 + +percent(sample: 20.0, total: 80.0) +// Returns 25.0 +``` + +### Transform values in a data stream +To transform multiple values in an input stream, your function needs to: + +- [Handle piped-forward data](/influxdb/v2.5/query-data/flux/custom-functions/#use-piped-forward-data-in-a-custom-function). +- Each operand necessary for the calculation exists in each row _(see [Pivot vs join](#pivot-vs-join) below)_. +- Use the [`map()` function](/{{< latest "flux" >}}/stdlib/universe/map) to iterate over each row. + +The example `multiplyByX()` function below includes: + +- A `tables` parameter that represents the input data stream (`<-`). +- An `x` parameter which is the number by which values in the `_value` column are multiplied. +- A `map()` function that iterates over each row in the input stream. + It uses the `with` operator to preserve existing columns in each row. + It also multiples the `_value` column by `x`. + +```js +multiplyByX = (x, tables=<-) => tables + |> map(fn: (r) => ({r with _value: r._value * x})) + +data + |> multiplyByX(x: 10) +``` + +## Examples + +### Convert bytes to gigabytes +To convert active memory from bytes to gigabytes (GB), divide the `active` field +in the `mem` measurement by 1,073,741,824. + +The `map()` function iterates over each row in the piped-forward data and defines +a new `_value` by dividing the original `_value` by 1073741824. + +```js +from(bucket: "example-bucket") + |> range(start: -10m) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "active") + |> map(fn: (r) => ({r with _value: r._value / 1073741824})) +``` + +You could turn that same calculation into a function: + +```js +bytesToGB = (tables=<-) => tables + |> map(fn: (r) => ({r with _value: r._value / 1073741824})) + +data + |> bytesToGB() +``` + +#### Include partial gigabytes +Because the original metric (bytes) is an integer, the output of the operation is an integer and does not include partial GBs. +To calculate partial GBs, convert the `_value` column and its values to floats using the +[`float()` function](/{{< latest "flux" >}}/stdlib/universe/float) +and format the denominator in the division operation as a float. + +```js +bytesToGB = (tables=<-) => tables + |> map(fn: (r) => ({r with _value: float(v: r._value) / 1073741824.0})) +``` + +### Calculate a percentage +To calculate a percentage, use simple division, then multiply the result by 100. + +```js +> 1.0 / 4.0 * 100.0 +25.0 +``` + +_For an in-depth look at calculating percentages, see [Calculate percentages](/influxdb/v2.5/query-data/flux/calculate-percentages)._ + +## Pivot vs join +To query and use values in mathematical operations in Flux, operand values must +exists in a single row. +Both `pivot()` and `join()` will do this, but there are important differences between the two: + +#### Pivot is more performant +`pivot()` reads and operates on a single stream of data. +`join()` requires two streams of data and the overhead of reading and combining +both streams can be significant, especially for larger data sets. + +#### Use join for multiple data sources +Use `join()` when querying data from different buckets or data sources. + +##### Pivot fields into columns for mathematic calculations +```js +data + |> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value") + |> map(fn: (r) => ({r with _value: (r.field1 + r.field2) / r.field3 * 100.0})) +``` + +##### Join multiple data sources for mathematic calculations +```js +import "sql" +import "influxdata/influxdb/secrets" + +pgUser = secrets.get(key: "POSTGRES_USER") +pgPass = secrets.get(key: "POSTGRES_PASSWORD") +pgHost = secrets.get(key: "POSTGRES_HOST") + +t1 = sql.from( + driverName: "postgres", + dataSourceName: "postgresql://${pgUser}:${pgPass}@${pgHost}", + query: "SELECT id, name, available FROM example_table", +) + +t2 = from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "example-measurement" and r._field == "example-field") + +join(tables: {t1: t1, t2: t2}, on: ["id"]) + |> map(fn: (r) => ({r with _value: r._value_t2 / r.available_t1 * 100.0})) +``` diff --git a/content/influxdb/v2.5/query-data/flux/median.md b/content/influxdb/v2.5/query-data/flux/median.md new file mode 100644 index 000000000..b8e28d601 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/median.md @@ -0,0 +1,149 @@ +--- +title: Find median values +seotitle: Find median values in Flux +list_title: Median +description: > + Use `median()` to return a value representing the `0.5` quantile (50th percentile) or median of input data. +weight: 210 +menu: + influxdb_2_5: + parent: Query with Flux + name: Median +influxdb/v2.5/tags: [query, median] +related: + - /influxdb/v2.5/query-data/flux/percentile-quantile/ + - /{{< latest "flux" >}}/stdlib/universe/median/ + - /{{< latest "flux" >}}/stdlib/universe/quantile/ +list_query_example: median +--- + +Use the [`median()` function](/{{< latest "flux" >}}/stdlib/universe/median/) +to return a value representing the `0.5` quantile (50th percentile) or median of input data. + +## Select a method for calculating the median +Select one of the following methods to calculate the median: + +- [estimate_tdigest](#estimate_tdigest) +- [exact_mean](#exact_mean) +- [exact_selector](#exact_selector) + +### estimate_tdigest +**(Default)** An aggregate method that uses a [t-digest data structure](https://github.com/tdunning/t-digest) +to compute an accurate `0.5` quantile estimate on large data sources. +Output tables consist of a single row containing the calculated median. + +{{< flex >}} +{{% flex-content %}} +**Given the following input table:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | 1.0 | +| 2020-01-01T00:02:00Z | 1.0 | +| 2020-01-01T00:03:00Z | 2.0 | +| 2020-01-01T00:04:00Z | 3.0 | +{{% /flex-content %}} +{{% flex-content %}} +**`estimate_tdigest` returns:** + +| _value | +|:------:| +| 1.5 | +{{% /flex-content %}} +{{< /flex >}} + +### exact_mean +An aggregate method that takes the average of the two points closest to the `0.5` quantile value. +Output tables consist of a single row containing the calculated median. + +{{< flex >}} +{{% flex-content %}} +**Given the following input table:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | 1.0 | +| 2020-01-01T00:02:00Z | 1.0 | +| 2020-01-01T00:03:00Z | 2.0 | +| 2020-01-01T00:04:00Z | 3.0 | +{{% /flex-content %}} +{{% flex-content %}} +**`exact_mean` returns:** + +| _value | +|:------:| +| 1.5 | +{{% /flex-content %}} +{{< /flex >}} + +### exact_selector +A selector method that returns the data point for which at least 50% of points are less than. +Output tables consist of a single row containing the calculated median. + +{{< flex >}} +{{% flex-content %}} +**Given the following input table:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | 1.0 | +| 2020-01-01T00:02:00Z | 1.0 | +| 2020-01-01T00:03:00Z | 2.0 | +| 2020-01-01T00:04:00Z | 3.0 | +{{% /flex-content %}} +{{% flex-content %}} +**`exact_selector` returns:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:02:00Z | 1.0 | +{{% /flex-content %}} +{{< /flex >}} + +{{% note %}} +The examples below use the [example data variable](/influxdb/v2.5/query-data/flux/#example-data-variable). +{{% /note %}} + +## Find the value that represents the median +Use the default method, `"estimate_tdigest"`, to return all rows in a table that +contain values in the 50th percentile of data in the table. + +```js +data + |> median() +``` + +## Find the average of values closest to the median +Use the `exact_mean` method to return a single row per input table containing the +average of the two values closest to the mathematical median of data in the table. + +```js +data + |> median(method: "exact_mean") +``` + +## Find the point with the median value +Use the `exact_selector` method to return a single row per input table containing the +value that 50% of values in the table are less than. + +```js +data + |> median(method: "exact_selector") +``` + +## Use median() with aggregateWindow() +[`aggregateWindow()`](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow/) +segments data into windows of time, aggregates data in each window into a single +point, and then removes the time-based segmentation. +It is primarily used to [downsample data](/influxdb/v2.5/process-data/common-tasks/downsample-data/). + +To specify the [median calculation method](#select-a-method-for-calculating-the-median) in `aggregateWindow()`, use the +[full function syntax](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow/#specify-parameters-of-the-aggregate-function): + +```js +data + |> aggregateWindow( + every: 5m, + fn: (tables=<-, column) => tables |> median(method: "exact_selector"), + ) +``` diff --git a/content/influxdb/v2.5/query-data/flux/monitor-states.md b/content/influxdb/v2.5/query-data/flux/monitor-states.md new file mode 100644 index 000000000..8d358917b --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/monitor-states.md @@ -0,0 +1,189 @@ +--- +title: Monitor states +seotitle: Monitor states and state changes in your events and metrics with Flux. +description: Flux provides several functions to help monitor states and state changes in your data. +influxdb/v2.5/tags: [states, monitor, flux] +menu: + influxdb_2_5: + name: Monitor states + parent: Query with Flux +weight: 220 +aliases: + - /influxdb/v2.5/query-data/guides/monitor-states/ +related: + - /{{< latest "flux" >}}/stdlib/universe/stateduration/ + - /{{< latest "flux" >}}/stdlib/universe/statecount/ +--- + +Flux helps you monitor states in your metrics and events: + +- [Find how long a state persists](#find-how-long-a-state-persists) +- [Count the number of consecutive states](#count-the-number-of-consecutive-states) + + +If you're just getting started with Flux queries, check out the following: + +- [Get started with Flux](/{{< latest "flux" >}}/get-started/) for a conceptual overview of Flux. +- [Execute queries](/influxdb/v2.5/query-data/execute-queries/) to discover a variety of ways to run your queries. + +## Find how long a state persists + +Use [`stateDuration()`](/{{< latest "flux" >}}/stdlib/universe/stateduration/) +to calculate the duration of consecutive rows with a specified state. +For each consecutive point that matches the specified state, `stateDuration()` +increments and stores the duration (in the specified unit) in a user-defined column. + +Include the following information: + +- **Column to search:** any tag key, tag value, field key, field value, or measurement. +- **Value:** the value (or state) to search for in the specified column. +- **State duration column:** a new column to store the state duration─the length of time that the specified value persists. +- **Unit:** the unit of time (`1s` (by default), `1m`, `1h`) used to increment the state duration. + +```js +data + |> stateDuration(fn: (r) => r.column_to_search == "value_to_search_for", column: "state_duration", unit: 1s) +``` + +- For the first point that evaluates `true`, the state duration is set to `0`. + For each consecutive point that evaluates `true`, the state duration + increases by the time interval between each consecutive point (in specified units). +- If the state is `false`, the state duration is reset to `-1`. + +### Example query with stateDuration() + +The following query searches the `doors` bucket over the past 5 minutes to find how many seconds a door has been `closed`. + +```js +from(bucket: "doors") + |> range(start: -5m) + |> stateDuration(fn: (r) => r._value == "closed", column: "door_closed", unit: 1s) +``` + +In this example, `door_closed` is the **State duration** column. +If you write data to the `doors` bucket every minute, the state duration +increases by `60s` for each consecutive point where `_value` is `closed`. +If `_value` is not `closed`, the state duration is reset to `0`. + +#### Query results + +Results for the example query above may look like this (for simplicity, we've omitted the measurement, tag, and field columns): + +| _time | _value | door_closed | +| :------------------- | :----: | ----------: | +| 2019-10-26T17:39:16Z | closed | 0 | +| 2019-10-26T17:40:16Z | closed | 60 | +| 2019-10-26T17:41:16Z | closed | 120 | +| 2019-10-26T17:42:16Z | open | -1 | +| 2019-10-26T17:43:16Z | closed | 0 | +| 2019-10-26T17:44:27Z | closed | 60 | + +## Count the number of consecutive states + +Use the [`stateCount()` function](/{{< latest "flux" >}}/stdlib/universe/statecount/) +and include the following information: + +- **Column to search:** any tag key, tag value, field key, field value, or measurement. +- **Value:** to search for in the specified column. +- **State count column:** a new column to store the state count─the number of + consecutive records in which the specified value exists. + +```js +|> stateCount( + fn: (r) => r.column_to_search == "value_to_search_for", + column: "state_count", +) +``` + +- For the first point that evaluates `true`, the state count is set to `1`. For each consecutive point that evaluates `true`, the state count increases by 1. +- If the state is `false`, the state count is reset to `-1`. + +### Example query with stateCount() + +The following query searches the `doors` bucket over the past 5 minutes and calculates how many points have `closed` as their `_value`. + +```js +from(bucket: "doors") + |> range(start: -5m) + |> stateCount(fn: (r) => r._value == "closed", column: "door_closed") +``` + +This example stores the **state count** in the `door_closed` column. If you write data to the `doors` bucket every minute, the state count increases by `1` for each consecutive point where `_value` is `closed`. If `_value` is not `closed`, the state count is reset to `-1`. + +#### Query results + +Results for the example query above may look like this (for simplicity, we've omitted the measurement, tag, and field columns): + +| _time | _value | door_closed | +| :------------------- | :----: | ----------: | +| 2019-10-26T17:39:16Z | closed | 1 | +| 2019-10-26T17:40:16Z | closed | 2 | +| 2019-10-26T17:41:16Z | closed | 3 | +| 2019-10-26T17:42:16Z | open | -1 | +| 2019-10-26T17:43:16Z | closed | 1 | +| 2019-10-26T17:44:27Z | closed | 2 | + +#### Example query to count machine state + +The following query checks the machine state every minute (idle, assigned, or busy). InfluxDB searches the `servers` bucket over the past hour and counts records with a machine state of `idle`, `assigned` or `busy`. + +```js +from(bucket: "servers") + |> range(start: -1h) + |> filter(fn: (r) => r.machine_state == "idle" or r.machine_state == "assigned" or r.machine_state == "busy") + |> stateCount(fn: (r) => r.machine_state == "busy", column: "_count") + |> stateCount(fn: (r) => r.machine_state == "assigned", column: "_count") + |> stateCount(fn: (r) => r.machine_state == "idle", column: "_count") +``` + + + + diff --git a/content/influxdb/v2.5/query-data/flux/moving-average.md b/content/influxdb/v2.5/query-data/flux/moving-average.md new file mode 100644 index 000000000..91e63a39b --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/moving-average.md @@ -0,0 +1,118 @@ +--- +title: Calculate the moving average +seotitle: Calculate the moving average in Flux +list_title: Moving Average +description: > + Use `movingAverage()` or `timedMovingAverage()` to return the moving average of data. +weight: 210 +menu: + influxdb_2_5: + parent: Query with Flux + name: Moving Average +influxdb/v2.5/tags: [query, moving average] +related: + - /{{< latest "flux" >}}/stdlib/universe/movingaverage/ + - /{{< latest "flux" >}}/stdlib/universe/timedmovingaverage/ +list_query_example: moving_average +--- + +Use [`movingAverage()`](/{{< latest "flux" >}}/stdlib/universe/movingaverage/) +or [`timedMovingAverage()`](/{{< latest "flux" >}}/stdlib/universe/timedmovingaverage/) +to return the moving average of data. + +```js +data + |> movingAverage(n: 5) + +// OR + +data + |> timedMovingAverage(every: 5m, period: 10m) +``` + +### movingAverage() +For each row in a table, `movingAverage()` returns the average of the current value and +**previous** values where `n` is the total number of values used to calculate the average. + +If `n = 3`: + +| Row # | Calculation | +|:-----:|:----------- | +| 1 | _Insufficient number of rows_ | +| 2 | _Insufficient number of rows_ | +| 3 | (Row1 + Row2 + Row3) / 3 | +| 4 | (Row2 + Row3 + Row4) / 3 | +| 5 | (Row3 + Row4 + Row5) / 3 | + +{{< flex >}} +{{% flex-content %}} +**Given the following input:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | 1.0 | +| 2020-01-01T00:02:00Z | 1.2 | +| 2020-01-01T00:03:00Z | 1.8 | +| 2020-01-01T00:04:00Z | 0.9 | +| 2020-01-01T00:05:00Z | 1.4 | +| 2020-01-01T00:06:00Z | 2.0 | +{{% /flex-content %}} +{{% flex-content %}} +**The following would return:** + +```js +|> movingAverage(n: 3) +``` + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:03:00Z | 1.33 | +| 2020-01-01T00:04:00Z | 1.30 | +| 2020-01-01T00:05:00Z | 1.36 | +| 2020-01-01T00:06:00Z | 1.43 | +{{% /flex-content %}} +{{< /flex >}} + +### timedMovingAverage() +For each row in a table, `timedMovingAverage()` returns the average of the +current value and all row values in the **previous** `period` (duration). +It returns moving averages at a frequency defined by the `every` parameter. + +Each color in the diagram below represents a period of time used to calculate an +average and the time a point representing the average is returned. +If `every = 30m` and `period = 1h`: + +{{< svg "/static/svgs/timed-moving-avg.svg" >}} + +{{< flex >}} +{{% flex-content %}} +**Given the following input:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:00:00Z | 1.0 | +| 2020-01-01T00:30:00Z | 1.2 | +| 2020-01-01T01:00:00Z | 1.8 | +| 2020-01-01T01:30:00Z | 0.9 | +| 2020-01-01T02:00:00Z | 1.4 | +| 2020-01-01T02:30:00Z | 2.0 | +| 2020-01-01T03:00:00Z | 1.9 | +{{% /flex-content %}} +{{% flex-content %}} +**The following would return:** + +```js +|> timedMovingAverage(every: 30m, period: 1h) +``` + +| _time | _value | +| :------------------- | -----: | +| 2020-01-01T00:30:00Z | 1.0 | +| 2020-01-01T01:00:00Z | 1.1 | +| 2020-01-01T01:30:00Z | 1.5 | +| 2020-01-01T02:00:00Z | 1.35 | +| 2020-01-01T02:30:00Z | 1.15 | +| 2020-01-01T03:00:00Z | 1.7 | +| 2020-01-01T03:00:00Z | 2 | +{{% /flex-content %}} +{{< /flex >}} diff --git a/content/influxdb/v2.5/query-data/flux/operate-on-timestamps.md b/content/influxdb/v2.5/query-data/flux/operate-on-timestamps.md new file mode 100644 index 000000000..e3e8b8acb --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/operate-on-timestamps.md @@ -0,0 +1,203 @@ +--- +title: Operate on timestamps with Flux +list_title: Operate on timestamps +description: > + Use Flux to process and operate on timestamps. +menu: + influxdb_2_5: + name: Operate on timestamps + parent: Query with Flux +weight: 220 +aliases: + - /influxdb/v2.5/query-data/guides/manipulate-timestamps/ + - /influxdb/v2.5/query-data/flux/manipulate-timestamps/ +related: + - /{{< latest "flux" >}}/stdlib/universe/now/ + - /{{< latest "flux" >}}/stdlib/system/time/ + - /{{< latest "flux" >}}/stdlib/universe/time/ + - /{{< latest "flux" >}}/stdlib/universe/uint/ + - /{{< latest "flux" >}}/stdlib/universe/int/ + - /{{< latest "flux" >}}/stdlib/universe/truncatetimecolumn/ + - /{{< latest "flux" >}}/stdlib/date/truncate/ + - /{{< latest "flux" >}}/stdlib/date/add/ + - /{{< latest "flux" >}}/stdlib/date/sub/ +--- + +Every point stored in InfluxDB has an associated timestamp. +Use Flux to process and operate on timestamps to suit your needs. + +- [Convert timestamp format](#convert-timestamp-format) +- [Calculate the duration between two timestamps](#calculate-the-duration-between-two-timestamps) +- [Retrieve the current time](#retrieve-the-current-time) +- [Normalize irregular timestamps](#normalize-irregular-timestamps) +- [Use timestamps and durations together](#use-timestamps-and-durations-together) + +{{% note %}} +If you're just getting started with Flux queries, check out the following: + +- [Get started with Flux](/{{< latest "flux" >}}/get-started/) for a conceptual overview of Flux and parts of a Flux query. +- [Execute queries](/influxdb/v2.5/query-data/execute-queries/) to discover a variety of ways to run your queries. +{{% /note %}} + + +## Convert timestamp format + +- [Unix nanosecond to RFC3339](#unix-nanosecond-to-rfc3339) +- [RFC3339 to Unix nanosecond](#rfc3339-to-unix-nanosecond) + +### Unix nanosecond to RFC3339 +Use the [`time()` function](/{{< latest "flux" >}}/stdlib/universe/time/) +to convert a [Unix **nanosecond** timestamp](/influxdb/v2.5/reference/glossary/#unix-timestamp) +to an [RFC3339 timestamp](/influxdb/v2.5/reference/glossary/#rfc3339-timestamp). + +```js +time(v: 1568808000000000000) +// Returns 2019-09-18T12:00:00.000000000Z +``` + +### RFC3339 to Unix nanosecond +Use the [`uint()` function](/{{< latest "flux" >}}/stdlib/universe/uint/) +to convert an RFC3339 timestamp to a Unix nanosecond timestamp. + +```js +uint(v: 2019-09-18T12:00:00.000000000Z) +// Returns 1568808000000000000 +``` + +## Calculate the duration between two timestamps +Flux doesn't support mathematical operations using [time type](/{{< latest "flux" >}}/spec/types/#time-types) values. +To calculate the duration between two timestamps: + +1. Use the `uint()` function to convert each timestamp to a Unix nanosecond timestamp. +2. Subtract one Unix nanosecond timestamp from the other. +3. Use the `duration()` function to convert the result into a duration. + +```js +time1 = uint(v: 2019-09-17T21:12:05Z) +time2 = uint(v: 2019-09-18T22:16:35Z) + +duration(v: time2 - time1) +// Returns 25h4m30s +``` + +{{% note %}} +Flux doesn't support duration column types. +To store a duration in a column, use the [`string()` function](/{{< latest "flux" >}}/stdlib/universe/string/) +to convert the duration to a string. +{{% /note %}} + +## Retrieve the current time +- [Current UTC time](#current-utc-time) +- [Current system time](#current-system-time) + +### Current UTC time +Use the [`now()` function](/{{< latest "flux" >}}/stdlib/universe/now/) to +return the current UTC time in RFC3339 format. + +```js +now() +``` + +{{% note %}} +`now()` is cached at runtime, so all instances of `now()` in a Flux script +return the same value. +{{% /note %}} + +### Current system time +Import the `system` package and use the [`system.time()` function](/{{< latest "flux" >}}/stdlib/system/time/) +to return the current system time of the host machine in RFC3339 format. + +```js +import "system" + +system.time() +``` + +{{% note %}} +`system.time()` returns the time it is executed, so each instance of `system.time()` +in a Flux script returns a unique value. +{{% /note %}} + +## Normalize irregular timestamps +To normalize irregular timestamps, truncate all `_time` values to a specified unit +with the [`truncateTimeColumn()` function](/{{< latest "flux" >}}/stdlib/universe/truncatetimecolumn/). +This is useful in [`join()`](/{{< latest "flux" >}}/stdlib/universe/join/) +and [`pivot()`](/{{< latest "flux" >}}/stdlib/universe/pivot/) +operations where points should align by time, but timestamps vary slightly. + +```js +data + |> truncateTimeColumn(unit: 1m) +``` + +{{< flex >}} +{{% flex-content %}} +**Input:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:00:49Z | 2.0 | +| 2020-01-01T00:01:01Z | 1.9 | +| 2020-01-01T00:03:22Z | 1.8 | +| 2020-01-01T00:04:04Z | 1.9 | +| 2020-01-01T00:05:38Z | 2.1 | +{{% /flex-content %}} +{{% flex-content %}} +**Output:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:00:00Z | 2.0 | +| 2020-01-01T00:01:00Z | 1.9 | +| 2020-01-01T00:03:00Z | 1.8 | +| 2020-01-01T00:04:00Z | 1.9 | +| 2020-01-01T00:05:00Z | 2.1 | +{{% /flex-content %}} +{{< /flex >}} + +## Use timestamps and durations together +- [Add a duration to a timestamp](#add-a-duration-to-a-timestamp) +- [Subtract a duration from a timestamp](#subtract-a-duration-from-a-timestamp) + +### Add a duration to a timestamp +[`date.add()`](/{{< latest "flux" >}}/stdlib/date/add/) +adds a duration to a specified time and returns the resulting time. + +```js +import "date" + +date.add(d: 6h, to: 2019-09-16T12:00:00Z) + +// Returns 2019-09-16T18:00:00.000000000Z +``` + +### Subtract a duration from a timestamp +[`date.sub()`](/{{< latest "flux" >}}/stdlib/date/sub/) +subtracts a duration from a specified time and returns the resulting time. + +```js +import "date" + +date.sub(d: 6h, from: 2019-09-16T12:00:00Z) + +// Returns 2019-09-16T06:00:00.000000000Z +``` + +### Shift a timestamp forward or backward + +The [timeShift()](/{{< latest "flux" >}}/stdlib/universe/timeshift/) function adds the specified duration of time to each value in time columns (`_start`, `_stop`, `_time`). + +Shift forward in time: + +```js +from(bucket: "example-bucket") + |> range(start: -5m) + |> timeShift(duration: 12h) +``` +Shift backward in time: + +```js +from(bucket: "example-bucket") + |> range(start: -5m) + |> timeShift(duration: -12h) +``` diff --git a/content/influxdb/v2.5/query-data/flux/percentile-quantile.md b/content/influxdb/v2.5/query-data/flux/percentile-quantile.md new file mode 100644 index 000000000..511bcc7c7 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/percentile-quantile.md @@ -0,0 +1,164 @@ +--- +title: Find percentile and quantile values +seotitle: Query percentile and quantile values in Flux +list_title: Percentile & quantile +description: > + Use the `quantile()` function to return all values within the `q` quantile or + percentile of input data. +weight: 210 +menu: + influxdb_2_5: + parent: Query with Flux + name: Percentile & quantile +influxdb/v2.5/tags: [query, percentile, quantile] +related: + - /influxdb/v2.5/query-data/flux/query-median/ + - /{{< latest "flux" >}}/stdlib/universe/quantile/ +list_query_example: quantile +--- + +Use the [`quantile()` function](/{{< latest "flux" >}}/stdlib/universe/quantile/) +to return a value representing the `q` quantile or percentile of input data. + +## Percentile versus quantile +Percentiles and quantiles are very similar, differing only in the number used to calculate return values. +A percentile is calculated using numbers between `0` and `100`. +A quantile is calculated using numbers between `0.0` and `1.0`. +For example, the **`0.5` quantile** is the same as the **50th percentile**. + +## Select a method for calculating the quantile +Select one of the following methods to calculate the quantile: + +- [estimate_tdigest](#estimate_tdigest) +- [exact_mean](#exact_mean) +- [exact_selector](#exact_selector) + +### estimate_tdigest +**(Default)** An aggregate method that uses a [t-digest data structure](https://github.com/tdunning/t-digest) +to compute a quantile estimate on large data sources. +Output tables consist of a single row containing the calculated quantile. + +If calculating the `0.5` quantile or 50th percentile: + +{{< flex >}} +{{% flex-content %}} +**Given the following input table:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | 1.0 | +| 2020-01-01T00:02:00Z | 1.0 | +| 2020-01-01T00:03:00Z | 2.0 | +| 2020-01-01T00:04:00Z | 3.0 | +{{% /flex-content %}} +{{% flex-content %}} +**`estimate_tdigest` returns:** + +| _value | +|:------:| +| 1.5 | +{{% /flex-content %}} +{{< /flex >}} + +### exact_mean +An aggregate method that takes the average of the two points closest to the quantile value. +Output tables consist of a single row containing the calculated quantile. + +If calculating the `0.5` quantile or 50th percentile: + +{{< flex >}} +{{% flex-content %}} +**Given the following input table:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | 1.0 | +| 2020-01-01T00:02:00Z | 1.0 | +| 2020-01-01T00:03:00Z | 2.0 | +| 2020-01-01T00:04:00Z | 3.0 | +{{% /flex-content %}} +{{% flex-content %}} +**`exact_mean` returns:** + +| _value | +|:------:| +| 1.5 | +{{% /flex-content %}} +{{< /flex >}} + +### exact_selector +A selector method that returns the data point for which at least `q` points are less than. +Output tables consist of a single row containing the calculated quantile. + +If calculating the `0.5` quantile or 50th percentile: + +{{< flex >}} +{{% flex-content %}} +**Given the following input table:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:01:00Z | 1.0 | +| 2020-01-01T00:02:00Z | 1.0 | +| 2020-01-01T00:03:00Z | 2.0 | +| 2020-01-01T00:04:00Z | 3.0 | +{{% /flex-content %}} +{{% flex-content %}} +**`exact_selector` returns:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:02:00Z | 1.0 | +{{% /flex-content %}} +{{< /flex >}} + +{{% note %}} +The examples below use the [example data variable](/influxdb/v2.5/query-data/flux/#example-data-variable). +{{% /note %}} + +## Find the value representing the 99th percentile +Use the default method, `"estimate_tdigest"`, to return all rows in a table that +contain values in the 99th percentile of data in the table. + +```js +data + |> quantile(q: 0.99) +``` + +## Find the average of values closest to the quantile +Use the `exact_mean` method to return a single row per input table containing the +average of the two values closest to the mathematical quantile of data in the table. +For example, to calculate the `0.99` quantile: + +```js +data + |> quantile(q: 0.99, method: "exact_mean") +``` + +## Find the point with the quantile value +Use the `exact_selector` method to return a single row per input table containing the +value that `q * 100`% of values in the table are less than. +For example, to calculate the `0.99` quantile: + +```js +data + |> quantile(q: 0.99, method: "exact_selector") +``` + +## Use quantile() with aggregateWindow() +[`aggregateWindow()`](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow/) +segments data into windows of time, aggregates data in each window into a single +point, and then removes the time-based segmentation. +It is primarily used to [downsample data](/influxdb/v2.5/process-data/common-tasks/downsample-data/). + +To specify the [quantile calculation method](#select-a-method-for-calculating-the-quantile) in +`aggregateWindow()`, use the [full function syntax](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow/#specify-parameters-of-the-aggregate-function): + +```js +data + |> aggregateWindow( + every: 5m, + fn: (tables=<-, column) => tables + |> quantile(q: 0.99, method: "exact_selector"), + ) +``` diff --git a/content/influxdb/v2.5/query-data/flux/query-fields.md b/content/influxdb/v2.5/query-data/flux/query-fields.md new file mode 100644 index 000000000..7d7b8de7b --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/query-fields.md @@ -0,0 +1,76 @@ +--- +title: Query fields and tags +seotitle: Query fields and tags in InfluxDB using Flux +description: > + Use `filter()` to query data based on fields, tags, or any other column value. + `filter()` performs operations similar to the `SELECT` statement and the `WHERE` + clause in InfluxQL and other SQL-like query languages. +weight: 201 +menu: + influxdb_2_5: + parent: Query with Flux +influxdb/v2.5/tags: [query, select, where] +related: + - /{{< latest "flux" >}}/stdlib/universe/filter/ + - /influxdb/v2.5/query-data/flux/conditional-logic/ + - /influxdb/v2.5/query-data/flux/regular-expressions/ +list_code_example: | + ```js + from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "example-measurement" and r.tag == "example-tag") + |> filter(fn: (r) => r._field == "example-field") + ``` +--- + +Use [`filter()`](/{{< latest "flux" >}}/stdlib/universe/filter/) +to query data based on fields, tags, or any other column value. +`filter()` performs operations similar to the `SELECT` statement and the `WHERE` +clause in InfluxQL and other SQL-like query languages. + +## The filter() function +`filter()` has an `fn` parameter that expects a [predicate function](/influxdb/v2.5/reference/glossary/#predicate-function), +an anonymous function comprised of one or more [predicate expressions](/influxdb/v2.5/reference/glossary/#predicate-expression). +The predicate function evaluates each input row. +Rows that evaluate to `true` are **included** in the output data. +Rows that evaluate to `false` are **excluded** from the output data. + +```js +// ... + |> filter(fn: (r) => r._measurement == "example-measurement-name" ) +``` + +The `fn` predicate function requires an `r` argument, which represents each row +as `filter()` iterates over input data. +Key-value pairs in the row record represent columns and their values. +Use [dot notation or bracket notation](/{{< latest "flux" >}}/data-types/composite/record/#reference-values-in-a-record) +to reference specific column values in the predicate function. +Use [logical operators](/{{< latest "flux" >}}/spec/operators/#logical-operators) +to chain multiple predicate expressions together. + +```js +// Row record +r = {foo: "bar", baz: "quz"} + +// Example predicate function +(r) => r.foo == "bar" and r["baz"] == "quz" + +// Evaluation results +(r) => true and true +``` + +## Filter by fields and tags +The combination of [`from()`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/from), +[`range()`](/{{< latest "flux" >}}/stdlib/universe/range), +and `filter()` represent the most basic Flux query: + +1. Use `from()` to define your [bucket](/influxdb/v2.5/reference/glossary/#bucket). +2. Use `range()` to limit query results by time. +3. Use `filter()` to identify what rows of data to output. + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "example-measurement-name" and r.mytagname == "example-tag-value") + |> filter(fn: (r) => r._field == "example-field-name") +``` diff --git a/content/influxdb/v2.5/query-data/flux/rate.md b/content/influxdb/v2.5/query-data/flux/rate.md new file mode 100644 index 000000000..a99d404e0 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/rate.md @@ -0,0 +1,173 @@ +--- +title: Calculate the rate of change +seotitle: Calculate the rate of change in Flux +list_title: Rate +description: > + Use `derivative()` to calculate the rate of change between subsequent values or + `aggregate.rate()` to calculate the average rate of change per window of time. + If time between points varies, these functions normalize points to a common time interval + making values easily comparable. +weight: 210 +menu: + influxdb_2_5: + parent: Query with Flux + name: Rate +influxdb/v2.5/tags: [query, rate] +related: + - /{{< latest "flux" >}}/stdlib/universe/derivative/ + - /{{< latest "flux" >}}/stdlib/experimental/aggregate/rate/ +list_query_example: rate_of_change +--- + + +Use [`derivative()`](/{{< latest "flux" >}}/stdlib/universe/derivative/) +to calculate the rate of change between subsequent values or +[`aggregate.rate()`](/{{< latest "flux" >}}/stdlib/experimental/to/aggregate/rate/) +to calculate the average rate of change per window of time. +If time between points varies, these functions normalize points to a common time interval +making values easily comparable. + +- [Rate of change between subsequent values](#rate-of-change-between-subsequent-values) +- [Average rate of change per window of time](#average-rate-of-change-per-window-of-time) + +## Rate of change between subsequent values +Use the [`derivative()` function](/{{< latest "flux" >}}/stdlib/universe/derivative/) +to calculate the rate of change per unit of time between subsequent _non-null_ values. + +```js +data + |> derivative(unit: 1s) +``` + +By default, `derivative()` returns only positive derivative values and replaces negative values with _null_. +Calculated values are returned as [floats](/{{< latest "flux" >}}/spec/types/#numeric-types). + +{{< flex >}} +{{% flex-content %}} +**Given the following input:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:00:00Z | 250 | +| 2020-01-01T00:04:00Z | 160 | +| 2020-01-01T00:12:00Z | 150 | +| 2020-01-01T00:19:00Z | 220 | +| 2020-01-01T00:32:00Z | 200 | +| 2020-01-01T00:51:00Z | 290 | +| 2020-01-01T01:00:00Z | 340 | +{{% /flex-content %}} +{{% flex-content %}} +**`derivative(unit: 1m)` returns:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:04:00Z | | +| 2020-01-01T00:12:00Z | | +| 2020-01-01T00:19:00Z | 10.0 | +| 2020-01-01T00:32:00Z | | +| 2020-01-01T00:51:00Z | 4.74 | +| 2020-01-01T01:00:00Z | 5.56 | +{{% /flex-content %}} +{{< /flex >}} + +Results represent the rate of change **per minute** between subsequent values with +negative values set to _null_. + +### Return negative derivative values +To return negative derivative values, set the `nonNegative` parameter to `false`, + +{{< flex >}} +{{% flex-content %}} +**Given the following input:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:00:00Z | 250 | +| 2020-01-01T00:04:00Z | 160 | +| 2020-01-01T00:12:00Z | 150 | +| 2020-01-01T00:19:00Z | 220 | +| 2020-01-01T00:32:00Z | 200 | +| 2020-01-01T00:51:00Z | 290 | +| 2020-01-01T01:00:00Z | 340 | +{{% /flex-content %}} +{{% flex-content %}} +**The following returns:** + +```js +|> derivative(unit: 1m, nonNegative: false) +``` + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:04:00Z | -22.5 | +| 2020-01-01T00:12:00Z | -1.25 | +| 2020-01-01T00:19:00Z | 10.0 | +| 2020-01-01T00:32:00Z | -1.54 | +| 2020-01-01T00:51:00Z | 4.74 | +| 2020-01-01T01:00:00Z | 5.56 | +{{% /flex-content %}} +{{< /flex >}} + +Results represent the rate of change **per minute** between subsequent values and +include negative values. + +## Average rate of change per window of time + +Use the [`aggregate.rate()` function](/{{< latest "flux" >}}/stdlib/experimental/to/aggregate/rate/) +to calculate the average rate of change per window of time. + +```js +import "experimental/aggregate" + +data + |> aggregate.rate( + every: 1m, + unit: 1s, + groupColumns: ["tag1", "tag2"], + ) +``` + +`aggregate.rate()` returns the average rate of change (as a [float](/{{< latest "flux" >}}/spec/types/#numeric-types)) +per `unit` for time intervals defined by `every`. +Negative values are replaced with _null_. + +{{% note %}} +`aggregate.rate()` does not support `nonNegative: false`. +{{% /note %}} + +{{< flex >}} +{{% flex-content %}} +**Given the following input:** + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:00:00Z | 250 | +| 2020-01-01T00:04:00Z | 160 | +| 2020-01-01T00:12:00Z | 150 | +| 2020-01-01T00:19:00Z | 220 | +| 2020-01-01T00:32:00Z | 200 | +| 2020-01-01T00:51:00Z | 290 | +| 2020-01-01T01:00:00Z | 340 | +{{% /flex-content %}} +{{% flex-content %}} +**The following returns:** + +```js +|> aggregate.rate( + every: 20m, + unit: 1m, +) +``` + +| _time | _value | +|:----- | ------:| +| 2020-01-01T00:20:00Z | 10.00 | +| 2020-01-01T00:40:00Z | | +| 2020-01-01T01:00:00Z | 4.74 | +| 2020-01-01T01:20:00Z | 5.56 | +{{% /flex-content %}} +{{< /flex >}} + +Results represent the **average change rate per minute** of every **20 minute interval** +with negative values set to _null_. +Timestamps represent the right bound of the time window used to average values. diff --git a/content/influxdb/v2.5/query-data/flux/regular-expressions.md b/content/influxdb/v2.5/query-data/flux/regular-expressions.md new file mode 100644 index 000000000..462c08681 --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/regular-expressions.md @@ -0,0 +1,90 @@ +--- +title: Use regular expressions in Flux +list_title: Regular expressions +description: This guide walks through using regular expressions in evaluation logic in Flux functions. +influxdb/v2.5/tags: [regex] +menu: + influxdb_2_5: + name: Regular expressions + parent: Query with Flux +weight: 220 +aliases: + - /influxdb/v2.5/query-data/guides/regular-expressions/ +related: + - /influxdb/v2.5/query-data/flux/query-fields/ + - /{{< latest "flux" >}}/stdlib/regexp/ +list_query_example: regular_expressions +--- + +Regular expressions (regexes) are incredibly powerful when matching patterns in large collections of data. +With Flux, regular expressions are primarily used for evaluation logic in predicate functions for things +such as filtering rows, dropping and keeping columns, state detection, etc. +This guide shows how to use regular expressions in your Flux scripts. + +If you're just getting started with Flux queries, check out the following: + +- [Get started with Flux](/{{< latest "flux" >}}/get-started/) for a conceptual overview of Flux and parts of a Flux query. +- [Execute queries](/influxdb/v2.5/query-data/execute-queries/) to discover a variety of ways to run your queries. + +## Go regular expression syntax +Flux uses Go's [regexp package](https://golang.org/pkg/regexp/) for regular expression search. +The links [below](#helpful-links) provide information about Go's regular expression syntax. + +## Regular expression operators +Flux provides two comparison operators for use with regular expressions. + +#### `=~` +When the expression on the left **MATCHES** the regular expression on the right, this evaluates to `true`. + +#### `!~` +When the expression on the left **DOES NOT MATCH** the regular expression on the right, this evaluates to `true`. + +## Regular expressions in Flux +When using regex matching in your Flux scripts, enclose your regular expressions with `/`. +The following is the basic regex comparison syntax: + +###### Basic regex comparison syntax +```js +expression =~ /regex/ +expression !~ /regex/ +``` +## Examples + +### Use a regex to filter by tag value +The following example filters records by the `cpu` tag. +It only keeps records for which the `cpu` is either `cpu0`, `cpu1`, or `cpu2`. + +```js +from(bucket: "example-bucket") + |> range(start: -15m) + |> filter(fn: (r) => r._measurement == "cpu" and r.cpu =~ /cpu[0-2]$/) +``` + +### Use a regex to filter by field key +The following example excludes records that do not have `_percent` in a field key. + +```js +from(bucket: "example-bucket") + |> range(start: -15m) + |> filter(fn: (r) => r._measurement == "mem" and r._field =~ /_percent/) +``` + +### Drop columns matching a regex +The following example drops columns whose names do not being with `_`. + +```js +from(bucket: "example-bucket") + |> range(start: -15m) + |> filter(fn: (r) => r._measurement == "mem") + |> drop(fn: (column) => column !~ /_.*/) +``` + +## Helpful links + +##### Syntax documentation +[regexp Syntax GoDoc](https://godoc.org/regexp/syntax) +[RE2 Syntax Overview](https://github.com/google/re2/wiki/Syntax) + +##### Go regex testers +[Regex Tester - Golang](https://regex-golang.appspot.com/assets/html/index.html) +[Regex101](https://regex101.com/) diff --git a/content/influxdb/v2.5/query-data/flux/scalar-values.md b/content/influxdb/v2.5/query-data/flux/scalar-values.md new file mode 100644 index 000000000..7a4ad031c --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/scalar-values.md @@ -0,0 +1,232 @@ +--- +title: Extract scalar values in Flux +list_title: Extract scalar values +description: > + Use Flux dynamic query functions to extract scalar values from Flux query output. + This lets you, for example, dynamically set variables using query results. +menu: + influxdb_2_5: + name: Extract scalar values + parent: Query with Flux +weight: 220 +influxdb/v2.5/tags: [scalar] +related: + - /{{< latest "flux" >}}/function-types/#dynamic-queries, Flux dynamic query functions +aliases: + - /influxdb/v2.5/query-data/guides/scalar-values/ +list_code_example: | + ```js + scalarValue = (tables=<-) => { + _record = tables + |> findRecord(fn: (key) => true, idx: 0) + + return _record._value + } + ``` +--- + +Use Flux [dynamic query functions](/{{< latest "flux" >}}/function-types/#dynamic-queries) +to extract scalar values from Flux query output. +This lets you, for example, dynamically set variables using query results. + +**To extract scalar values from output:** + +1. [Extract a column from the input stream](#extract-a-column) + _**or**_ [extract a row from the input stream](#extract-a-row). +2. Use the returned array or record to reference scalar values. + +_The samples on this page use the [sample data provided below](#sample-data)._ + +{{% warn %}} +#### Current limitations +- The InfluxDB user interface (UI) does not currently support raw scalar output. + Use [`map()`](/{{< latest "flux" >}}/stdlib/universe/map/) to add + scalar values to output data. +{{% /warn %}} + +## Table extraction +Flux formats query results as a stream of tables. +Both [`findColumn()`](/{{< latest "flux" >}}/stdlib/universe/findcolumn/) +and [`findRecord()`](/{{< latest "flux" >}}/stdlib/universe/findrecord/) +extract the first table in a stream of tables whose [group key](/{{< latest "flux" >}}/get-started/data-model/#group-key) +values match the `fn` [predicate function](/{{< latest "flux" >}}/get-started/syntax-basics/#predicate-functions). + +{{% note %}} +#### Extract the correct table +Flux functions do not guarantee table order. +`findColumn()` and `findRecord` extract only the **first** table that matches the `fn` predicate. +To extract the correct table, be very specific in your predicate function or +filter and transform your data to minimize the number of tables piped-forward into the functions. +{{% /note %}} + +## Extract a column +Use the [`findColumn()` function](/{{< latest "flux" >}}/stdlib/universe/findcolumn/) +to output an array of values from a specific column in the extracted table. + +_See [Sample data](#sample-data) below._ + +```js +sampleData + |> findColumn( + fn: (key) => key._field == "temp" and key.location == "sfo", + column: "_value", + ) + +// Returns [65.1, 66.2, 66.3, 66.8] +``` + +### Use extracted column values +Use a variable to store the array of values. +In the example below, `SFOTemps` represents the array of values. +Reference a specific index (integer starting from `0`) in the array to return the +value at that index. + +_See [Sample data](#sample-data) below._ + +```js +SFOTemps = sampleData + |> findColumn( + fn: (key) => key._field == "temp" and key.location == "sfo", + column: "_value", + ) + +SFOTemps +// Returns [65.1, 66.2, 66.3, 66.8] + +SFOTemps[0] +// Returns 65.1 + +SFOTemps[2] +// Returns 66.3 +``` + +## Extract a row +Use the [`findRecord()` function](/{{< latest "flux" >}}/stdlib/universe/findrecord/) +to output data from a single row in the extracted table. +Specify the index of the row to output using the `idx` parameter. +The function outputs a record with key-value pairs for each column. + +```js +sampleData + |> findRecord( + fn: (key) => key._field == "temp" and key.location == "sfo", + idx: 0, + ) + +// Returns { +// _time:2019-11-11T12:00:00Z, +// _field:"temp", +// location:"sfo", +// _value: 65.1 +// } +``` + +### Use an extracted row record +Use a variable to store the extracted row record. +In the example below, `tempInfo` represents the extracted row. +Use [dot or bracket notation](/{{< latest "flux" >}}/data-types/composite/record/#dot-notation) +to reference keys in the record. + +```js +tempInfo = sampleData + |> findRecord( + fn: (key) => key._field == "temp" and key.location == "sfo", + idx: 0, + ) + +tempInfo +// Returns { +// _time:2019-11-11T12:00:00Z, +// _field:"temp", +// location:"sfo", +// _value: 65.1 +// } + +tempInfo._time +// Returns 2019-11-11T12:00:00Z + +tempInfo.location +// Returns sfo +``` + +## Example helper functions +Create custom helper functions to extract scalar values from query output. + +##### Extract a scalar field value +```js +// Define a helper function to extract field values +getFieldValue = (tables=<-, field) => { + extract = tables + |> findColumn(fn: (key) => key._field == field, column: "_value") + + return extract[0] +} + +// Use the helper function to define a variable +lastJFKTemp = sampleData + |> filter(fn: (r) => r.location == "kjfk") + |> last() + |> getFieldValue(field: "temp") + +lastJFKTemp +// Returns 71.2 +``` + +##### Extract scalar row data +```js +// Define a helper function to extract a row as a record +getRow = (tables=<-, field, idx=0) => { + extract = tables + |> findRecord(fn: (key) => true, idx: idx) + + return extract +} + +// Use the helper function to define a variable +lastReported = sampleData + |> last() + |> getRow(field: "temp") + +"The last location to report was ${lastReported.location}. +The temperature was ${string(v: lastReported._value)}°F." + +// Returns: +// The last location to report was kord. +// The temperature was 38.9°F. +``` + +--- + +## Sample data + +The following sample data set represents fictional temperature metrics collected +from three locations. +It's formatted in [annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/) and imported +into the Flux query using the [`csv.from()` function](/{{< latest "flux" >}}/stdlib/csv/from/). + +Place the following at the beginning of your query to use the sample data: + +{{% truncate %}} +```js +import "csv" + +sampleData = csv.from(csv: " +#datatype,string,long,dateTime:RFC3339,string,string,double +#group,false,true,false,true,true,false +#default,,,,,, +,result,table,_time,location,_field,_value +,,0,2019-11-01T12:00:00Z,sfo,temp,65.1 +,,0,2019-11-01T13:00:00Z,sfo,temp,66.2 +,,0,2019-11-01T14:00:00Z,sfo,temp,66.3 +,,0,2019-11-01T15:00:00Z,sfo,temp,66.8 +,,1,2019-11-01T12:00:00Z,kjfk,temp,69.4 +,,1,2019-11-01T13:00:00Z,kjfk,temp,69.9 +,,1,2019-11-01T14:00:00Z,kjfk,temp,71.0 +,,1,2019-11-01T15:00:00Z,kjfk,temp,71.2 +,,2,2019-11-01T12:00:00Z,kord,temp,46.4 +,,2,2019-11-01T13:00:00Z,kord,temp,46.3 +,,2,2019-11-01T14:00:00Z,kord,temp,42.7 +,,2,2019-11-01T15:00:00Z,kord,temp,38.9 +") +``` +{{% /truncate %}} diff --git a/content/influxdb/v2.5/query-data/flux/sort-limit.md b/content/influxdb/v2.5/query-data/flux/sort-limit.md new file mode 100644 index 000000000..51059b42a --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/sort-limit.md @@ -0,0 +1,68 @@ +--- +title: Sort and limit data with Flux +seotitle: Sort and limit data in InfluxDB with Flux +list_title: Sort and limit +description: > + Use `sort()` to order records within each table by specific columns and + `limit()` to limit the number of records in output tables to a fixed number, `n`. +influxdb/v2.5/tags: [sort, limit] +menu: + influxdb_2_5: + name: Sort and limit + parent: Query with Flux +weight: 203 +aliases: + - /influxdb/v2.5/query-data/guides/sort-limit/ +related: + - /{{< latest "flux" >}}/stdlib/universe/sort + - /{{< latest "flux" >}}/stdlib/universe/limit +list_query_example: sort_limit +--- + +Use [`sort()`](/{{< latest "flux" >}}/stdlib/universe/sort) +to order records within each table by specific columns and +[`limit()`](/{{< latest "flux" >}}/stdlib/universe/limit) +to limit the number of records in output tables to a fixed number, `n`. + +If you're just getting started with Flux queries, check out the following: + +- [Get started with Flux](/{{< latest "flux" >}}/get-started/) for a conceptual overview of Flux and parts of a Flux query. +- [Execute queries](/influxdb/v2.5/query-data/execute-queries/) to discover a variety of ways to run your queries. + +##### Example sorting system uptime + +The following example orders system uptime first by region, then host, then value. + +```js +from(bucket: "example-bucket") + |> range(start: -12h) + |> filter(fn: (r) => r._measurement == "system" and r._field == "uptime") + |> sort(columns: ["region", "host", "_value"]) +``` + +The [`limit()` function](/{{< latest "flux" >}}/stdlib/universe/limit) +limits the number of records in output tables to a fixed number, `n`. +The following example shows up to 10 records from the past hour. + +```js +from(bucket:"example-bucket") + |> range(start:-1h) + |> limit(n:10) +``` + +You can use `sort()` and `limit()` together to show the top N records. +The example below returns the 10 top system uptime values sorted first by +region, then host, then value. + +```js +from(bucket: "example-bucket") + |> range(start: -12h) + |> filter(fn: (r) => r._measurement == "system" and r._field == "uptime") + |> sort(columns: ["region", "host", "_value"]) + |> limit(n: 10) +``` + +You now have created a Flux query that sorts and limits data. +Flux also provides the [`top()`](/{{< latest "flux" >}}/stdlib/universe/top) +and [`bottom()`](/{{< latest "flux" >}}/stdlib/universe/bottom) +functions to perform both of these functions at the same time. diff --git a/content/influxdb/v2.5/query-data/flux/sql.md b/content/influxdb/v2.5/query-data/flux/sql.md new file mode 100644 index 000000000..b888bf33c --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/sql.md @@ -0,0 +1,378 @@ +--- +title: Query SQL data sources +seotitle: Query SQL data sources with InfluxDB +list_title: Query SQL data +description: > + The Flux `sql` package provides functions for working with SQL data sources. + Use `sql.from()` to query SQL databases like PostgreSQL, MySQL, Snowflake, + SQLite, Microsoft SQL Server, Amazon Athena, and Google BigQuery. +influxdb/v2.5/tags: [query, flux, sql] +menu: + influxdb_2_5: + parent: Query with Flux + list_title: SQL data +weight: 220 +aliases: + - /influxdb/v2.5/query-data/guides/sql/ +related: + - /{{< latest "flux" >}}/stdlib/sql/ +list_code_example: | + ```js + import "sql" + + sql.from( + driverName: "postgres", + dataSourceName: "postgresql://user:password@localhost", + query: "SELECT * FROM example_table", + ) + ``` +--- + +The [Flux](/influxdb/v2.5/reference/flux) `sql` package provides functions for working with SQL data sources. +[`sql.from()`](/{{< latest "flux" >}}/stdlib/sql/from/) lets you query SQL data sources +like [PostgreSQL](https://www.postgresql.org/), [MySQL](https://www.mysql.com/), +[Snowflake](https://www.snowflake.com/), [SQLite](https://www.sqlite.org/index.html), +[Microsoft SQL Server](https://www.microsoft.com/en-us/sql-server/default.aspx), +[Amazon Athena](https://aws.amazon.com/athena/) and [Google BigQuery](https://cloud.google.com/bigquery) +and use the results with InfluxDB dashboards, tasks, and other operations. + +- [Query a SQL data source](#query-a-sql-data-source) +- [Join SQL data with data in InfluxDB](#join-sql-data-with-data-in-influxdb) +- [Use SQL results to populate dashboard variables](#use-sql-results-to-populate-dashboard-variables) +- [Use secrets to store SQL database credentials](#use-secrets-to-store-sql-database-credentials) +- [Sample sensor data](#sample-sensor-data) + +If you're just getting started with Flux queries, check out the following: + +- [Get started with Flux](/{{< latest "flux" >}}/get-started/) for a conceptual overview of Flux and parts of a Flux query. +- [Execute queries](/influxdb/v2.5/query-data/execute-queries/) to discover a variety of ways to run your queries. + +## Query a SQL data source +To query a SQL data source: + +1. Import the `sql` package in your Flux query +2. Use the `sql.from()` function to specify the driver, data source name (DSN), + and query used to query data from your SQL data source: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[PostgreSQL](#) +[MySQL](#) +[Snowflake](#) +[SQLite](#) +[SQL Server](#) +[Athena](#) +[BigQuery](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```js +import "sql" + +sql.from( + driverName: "postgres", + dataSourceName: "postgresql://user:password@localhost", + query: "SELECT * FROM example_table", +) +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```js +import "sql" + +sql.from( + driverName: "mysql", + dataSourceName: "user:password@tcp(localhost:3306)/db", + query: "SELECT * FROM example_table", +) +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```js +import "sql" + +sql.from( + driverName: "snowflake", + dataSourceName: "user:password@account/db/exampleschema?warehouse=wh", + query: "SELECT * FROM example_table", +) +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```js +// NOTE: InfluxDB OSS and InfluxDB Cloud do not have access to +// the local filesystem and cannot query SQLite data sources. +// Use the Flux REPL to query an SQLite data source. + +import "sql" + +sql.from( + driverName: "sqlite3", + dataSourceName: "file:/path/to/test.db?cache=shared&mode=ro", + query: "SELECT * FROM example_table", +) +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```js +import "sql" + +sql.from( + driverName: "sqlserver", + dataSourceName: "sqlserver://user:password@localhost:1234?database=examplebdb", + query: "GO SELECT * FROM Example.Table", +) +``` + +_For information about authenticating with SQL Server using ADO-style parameters, +see [SQL Server ADO authentication](/{{< latest "flux" >}}/stdlib/sql/from/#sql-server-ado-authentication)._ +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```js +import "sql" +sql.from( + driverName: "awsathena", + dataSourceName: "s3://myorgqueryresults/?accessID=12ab34cd56ef®ion=region-name&secretAccessKey=y0urSup3rs3crEtT0k3n", + query: "GO SELECT * FROM Example.Table", +) +``` + +_For information about parameters to include in the Athena DSN, +see [Athena connection string](/{{< latest "flux" >}}/stdlib/sql/from/#athena-connection-string)._ +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +import "sql" +sql.from( + driverName: "bigquery", + dataSourceName: "bigquery://projectid/?apiKey=mySuP3r5ecR3tAP1K3y", + query: "SELECT * FROM exampleTable", +) +``` + +_For information about authenticating with BigQuery, see +[BigQuery authentication parameters](/{{< latest "flux" >}}/stdlib/sql/from/#bigquery-authentication-parameters)._ +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +_See the [`sql.from()` documentation](/{{< latest "flux" >}}/stdlib/sql/from/) for +information about required function parameters._ + +## Join SQL data with data in InfluxDB +One of the primary benefits of querying SQL data sources from InfluxDB +is the ability to enrich query results with data stored outside of InfluxDB. + +Using the [air sensor sample data](#sample-sensor-data) below, the following query +joins air sensor metrics stored in InfluxDB with sensor information stored in PostgreSQL. +The joined data lets you query and filter results based on sensor information +that isn't stored in InfluxDB. + +```js +// Import the "sql" package +import "sql" + +// Query data from PostgreSQL +sensorInfo = sql.from( + driverName: "postgres", + dataSourceName: "postgresql://localhost?sslmode=disable", + query: "SELECT * FROM sensors", +) + +// Query data from InfluxDB +sensorMetrics = from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "airSensors") + +// Join InfluxDB query results with PostgreSQL query results +join(tables: {metric: sensorMetrics, info: sensorInfo}, on: ["sensor_id"]) +``` + +## Use SQL results to populate dashboard variables +Use `sql.from()` to [create dashboard variables](/influxdb/v2.5/visualize-data/variables/create-variable/) +from SQL query results. +The following example uses the [air sensor sample data](#sample-sensor-data) below to +create a variable that lets you select the location of a sensor. + +```js +import "sql" + +sql.from( + driverName: "postgres", + dataSourceName: "postgresql://localhost?sslmode=disable", + query: "SELECT * FROM sensors", +) + |> rename(columns: {location: "_value"}) + |> keep(columns: ["_value"]) +``` + +Use the variable to manipulate queries in your dashboards. + +{{< img-hd src="/img/influxdb/2-0-sql-dashboard-variable.png" alt="Dashboard variable from SQL query results" />}} + +--- + +## Use secrets to store SQL database credentials +If your SQL database requires authentication, use [InfluxDB secrets](/influxdb/v2.5/security/secrets/) +to store and populate connection credentials. +By default, InfluxDB base64-encodes and stores secrets in its internal key-value store, BoltDB. +For added security, [store secrets in Vault](/influxdb/v2.5/security/secrets/use-vault/). + +### Store your database credentials as secrets +Use the [InfluxDB API](/influxdb/v2.5/reference/api/) or the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/secret/) +to store your database credentials as secrets. + +{{< tabs-wrapper >}} +{{% tabs %}} +[InfluxDB API](#) +[influx CLI](#) +{{% /tabs %}} +{{% tab-content %}} +```sh +curl --request PATCH http://localhost:8086/api/v2/orgs//secrets \ + --header 'Authorization: Token YOURAUTHTOKEN' \ + --header 'Content-type: application/json' \ + --data '{ + "POSTGRES_HOST": "http://example.com", + "POSTGRES_USER": "example-username", + "POSTGRES_PASS": "example-password" +}' +``` + +**To store secrets, you need:** + +- [your organization ID](/influxdb/v2.5/organizations/view-orgs/#view-your-organization-id) +- [your API token](/influxdb/v2.5/security/tokens/view-tokens/) +{{% /tab-content %}} +{{% tab-content %}} +```sh +# Syntax +influx secret update -k + +# Example +influx secret update -k POSTGRES_PASS +``` + +**When prompted, enter your secret value.** + +{{% warn %}} +You can provide the secret value with the `-v`, `--value` flag, but the **plain text +secret may appear in your shell history**. + +```sh +influx secret update -k -v +``` +{{% /warn %}} +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +### Use secrets in your query +Import the `influxdata/influxdb/secrets` package and use [string interpolation](/{{< latest "flux" >}}/spec/string-interpolation/) +to populate connection credentials with stored secrets in your Flux query. + +```js +import "sql" +import "influxdata/influxdb/secrets" + +POSTGRES_HOST = secrets.get(key: "POSTGRES_HOST") +POSTGRES_USER = secrets.get(key: "POSTGRES_USER") +POSTGRES_PASS = secrets.get(key: "POSTGRES_PASS") + +sql.from( + driverName: "postgres", + dataSourceName: "postgresql://${POSTGRES_USER}:${POSTGRES_PASS}@${POSTGRES_HOST}", + query: "SELECT * FROM sensors", +) +``` + +--- + +## Sample sensor data +The [air sensor sample data](#download-sample-air-sensor-data) and +[sample sensor information](#import-the-sample-sensor-information) simulate a +group of sensors that measure temperature, humidity, and carbon monoxide +in rooms throughout a building. +Each collected data point is stored in InfluxDB with a `sensor_id` tag that identifies +the specific sensor it came from. +Sample sensor information is stored in PostgreSQL. + +**Sample data includes:** + +- Simulated data collected from each sensor and stored in the `airSensors` measurement in **InfluxDB**: + - temperature + - humidity + - co + +- Information about each sensor stored in the `sensors` table in **PostgreSQL**: + - sensor_id + - location + - model_number + - last_inspected + +#### Download sample air sensor data + +1. [Create a bucket](/influxdb/v2.5/organizations/buckets/create-bucket/) to store the data. +2. [Create an InfluxDB task](/influxdb/v2.5/process-data/manage-tasks/create-task/) + and use the [`sample.data()` function](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/sample/data/) + to download sample air sensor data every 15 minutes. + Write the downloaded sample data to your new bucket: + + ```js + import "influxdata/influxdb/sample" + + option task = {name: "Collect sample air sensor data", every: 15m} + + sample.data(set: "airSensor") + |> to(org: "example-org", bucket: "example-bucket") + ``` + +3. [Query your target bucket](/influxdb/v2.5/query-data/execute-queries/) after + the first task run to ensure the sample data is writing successfully. + + ```js + from(bucket: "example-bucket") + |> range(start: -1m) + |> filter(fn: (r) => r._measurement == "airSensors") + ``` + +#### Import the sample sensor information +1. [Download and install PostgreSQL](https://www.postgresql.org/download/). +2. Download the sample sensor information CSV. + + Download sample sensor information + +3. Use a PostgreSQL client (`psql` or a GUI) to create the `sensors` table: + + ``` + CREATE TABLE sensors ( + sensor_id character varying(50), + location character varying(50), + model_number character varying(50), + last_inspected date + ); + ``` + +4. Import the downloaded CSV sample data. + _Update the `FROM` file path to the path of the downloaded CSV sample data._ + + ``` + COPY sensors(sensor_id,location,model_number,last_inspected) + FROM '/path/to/sample-sensor-info.csv' DELIMITER ',' CSV HEADER; + ``` + +5. Query the table to ensure the data was imported correctly: + + ``` + SELECT * FROM sensors; + ``` + +#### Import the sample data dashboard +Download and import the Air Sensors dashboard to visualize the generated data: + +View Air Sensors dashboard JSON + +_For information about importing a dashboard, see [Create a dashboard](/influxdb/v2.5/visualize-data/dashboards/create-dashboard)._ diff --git a/content/influxdb/v2.5/query-data/flux/window-aggregate.md b/content/influxdb/v2.5/query-data/flux/window-aggregate.md new file mode 100644 index 000000000..d815c9f6c --- /dev/null +++ b/content/influxdb/v2.5/query-data/flux/window-aggregate.md @@ -0,0 +1,358 @@ +--- +title: Window and aggregate data with Flux +seotitle: Window and aggregate data in InfluxDB with Flux +list_title: Window & aggregate +description: > + This guide walks through windowing and aggregating data with Flux and outlines + how it shapes your data in the process. +menu: + influxdb_2_5: + name: Window & aggregate + parent: Query with Flux +weight: 204 +influxdb/v2.5/tags: [flux, aggregates] +aliases: + - /influxdb/v2.5/query-data/guides/window-aggregate/ + - /influxdb/v2.5/query-data/flux/windowing-aggregating/ +related: + - /{{< latest "flux" >}}/stdlib/universe/aggregatewindow + - /{{< latest "flux" >}}/stdlib/universe/window + - /{{< latest "flux" >}}/function-types/#aggregates, Flux aggregate functions + - /{{< latest "flux" >}}/function-types/#selectors, Flux selector functions +list_query_example: aggregate_window +--- + +A common operation performed with time series data is grouping data into windows of time, +or "windowing" data, then aggregating windowed values into a new value. +This guide walks through windowing and aggregating data with Flux and demonstrates +how data is shaped in the process. + +If you're just getting started with Flux queries, check out the following: + +- [Get started with Flux](/{{< latest "flux" >}}/get-started/) for a conceptual overview of Flux and parts of a Flux query. +- [Execute queries](/influxdb/v2.5/query-data/execute-queries/) to discover a variety of ways to run your queries. + +{{% note %}} +The following example is an in-depth walk-through of the steps required to window and aggregate data. +The [`aggregateWindow()` function](#summing-up) performs these operations for you, but understanding +how data is shaped in the process helps to successfully create your desired output. +{{% /note %}} + +## Data set +For the purposes of this guide, define a variable that represents your base data set. +The following example queries the memory usage of the host machine. + +```js +dataSet = from(bucket: "example-bucket") + |> range(start: -5m) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") + |> drop(columns: ["host"]) +``` + +{{% note %}} +This example drops the `host` column from the returned data since the memory data +is only tracked for a single host and it simplifies the output tables. +Dropping the `host` column is optional and not recommended if monitoring memory +on multiple hosts. +{{% /note %}} + +`dataSet` can now be used to represent your base data, which will look similar to the following: + +{{% truncate %}} +``` +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:50:00.000000000Z 71.11611366271973 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:50:10.000000000Z 67.39630699157715 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:50:20.000000000Z 64.16666507720947 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:50:30.000000000Z 64.19951915740967 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:50:40.000000000Z 64.2122745513916 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:50:50.000000000Z 64.22209739685059 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:00.000000000Z 64.6336555480957 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:10.000000000Z 64.16516304016113 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:20.000000000Z 64.18349742889404 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:30.000000000Z 64.20474052429199 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:40.000000000Z 68.65062713623047 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:50.000000000Z 67.20139980316162 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:00.000000000Z 70.9143877029419 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:10.000000000Z 64.14549350738525 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:20.000000000Z 64.15379047393799 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:30.000000000Z 64.1592264175415 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:40.000000000Z 64.18190002441406 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:50.000000000Z 64.28837776184082 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:00.000000000Z 64.29731845855713 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:10.000000000Z 64.36963081359863 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:20.000000000Z 64.37397003173828 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:30.000000000Z 64.44413661956787 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:40.000000000Z 64.42906856536865 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:50.000000000Z 64.44573402404785 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:00.000000000Z 64.48912620544434 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:10.000000000Z 64.49522972106934 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:20.000000000Z 64.48652744293213 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:30.000000000Z 64.49949741363525 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:40.000000000Z 64.4949197769165 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:50.000000000Z 64.49787616729736 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:55:00.000000000Z 64.49816226959229 +``` +{{% /truncate %}} + +## Windowing data +Use the [`window()` function](/{{< latest "flux" >}}/stdlib/universe/window) +to group your data based on time bounds. +The most common parameter passed with the `window()` is `every` which +defines the duration of time between windows. +Other parameters are available, but for this example, window the base data +set into one minute windows. + +```js +dataSet + |> window(every: 1m) +``` + +{{% note %}} +The `every` parameter supports all [valid duration units](/{{< latest "flux" >}}/spec/types/#duration-types), +including **calendar months (`1mo`)** and **years (`1y`)**. +{{% /note %}} + +Each window of time is output in its own table containing all records that fall within the window. + +{{% truncate %}} +###### window() output tables +``` +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:50:00.000000000Z 71.11611366271973 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:50:10.000000000Z 67.39630699157715 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:50:20.000000000Z 64.16666507720947 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:50:30.000000000Z 64.19951915740967 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:50:40.000000000Z 64.2122745513916 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:50:50.000000000Z 64.22209739685059 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:51:00.000000000Z 64.6336555480957 +2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:51:10.000000000Z 64.16516304016113 +2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:51:20.000000000Z 64.18349742889404 +2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:51:30.000000000Z 64.20474052429199 +2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:51:40.000000000Z 68.65062713623047 +2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:51:50.000000000Z 67.20139980316162 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:52:00.000000000Z 70.9143877029419 +2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:52:10.000000000Z 64.14549350738525 +2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:52:20.000000000Z 64.15379047393799 +2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:52:30.000000000Z 64.1592264175415 +2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:52:40.000000000Z 64.18190002441406 +2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:52:50.000000000Z 64.28837776184082 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:53:00.000000000Z 64.29731845855713 +2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:53:10.000000000Z 64.36963081359863 +2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:53:20.000000000Z 64.37397003173828 +2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:53:30.000000000Z 64.44413661956787 +2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:53:40.000000000Z 64.42906856536865 +2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:53:50.000000000Z 64.44573402404785 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:00.000000000Z 64.48912620544434 +2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:10.000000000Z 64.49522972106934 +2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:20.000000000Z 64.48652744293213 +2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:30.000000000Z 64.49949741363525 +2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:40.000000000Z 64.4949197769165 +2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:50.000000000Z 64.49787616729736 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:55:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:55:00.000000000Z 64.49816226959229 +``` +{{% /truncate %}} + +When visualized in the InfluxDB UI, each window table is displayed in a different color. + +![Windowed data](/img/flux/simple-windowed-data.png) + +## Aggregate data +[Aggregate functions](/{{< latest "flux" >}}/function-types#aggregates) take the values +of all rows in a table and use them to perform an aggregate operation. +The result is output as a new value in a single-row table. + +Since windowed data is split into separate tables, aggregate operations run against +each table separately and output new tables containing only the aggregated value. + +For this example, use the [`mean()` function](/{{< latest "flux" >}}/stdlib/universe/mean) +to output the average of each window: + +```js +dataSet + |> window(every: 1m) + |> mean() +``` + +{{% truncate %}} +###### mean() output tables +``` +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------------- +2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 65.88549613952637 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------------- +2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 65.50651391347249 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------------- +2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 65.30719598134358 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------------- +2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 64.39330975214641 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------------- +2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 64.49386278788249 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------------- +2018-11-03T17:55:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 64.49816226959229 +``` +{{% /truncate %}} + +Because each data point is contained in its own table, when visualized, +they appear as single, unconnected points. + +![Aggregated windowed data](/img/flux/simple-windowed-aggregate-data.png) + +### Recreate the time column +**Notice the `_time` column is not in the [aggregated output tables](#mean-output-tables).** +Because records in each table are aggregated together, their timestamps no longer +apply and the column is removed from the group key and table. + +Also notice the `_start` and `_stop` columns still exist. +These represent the lower and upper bounds of the time window. + +Many Flux functions rely on the `_time` column. +To further process your data after an aggregate function, you need to re-add `_time`. +Use the [`duplicate()` function](/{{< latest "flux" >}}/stdlib/universe/duplicate) to +duplicate either the `_start` or `_stop` column as a new `_time` column. + +```js +dataSet + |> window(every: 1m) + |> mean() + |> duplicate(column: "_stop", as: "_time") +``` + +{{% truncate %}} +###### duplicate() output tables +``` +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:51:00.000000000Z 65.88549613952637 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:52:00.000000000Z 65.50651391347249 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:53:00.000000000Z 65.30719598134358 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:54:00.000000000Z 64.39330975214641 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:55:00.000000000Z 64.49386278788249 + + +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:55:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:55:00.000000000Z 64.49816226959229 +``` +{{% /truncate %}} + +## "Unwindow" aggregate tables +Keeping aggregate values in separate tables generally isn't the format in which you want your data. +Use the `window()` function to "unwindow" your data into a single infinite (`inf`) window. + +```js +dataSet + |> window(every: 1m) + |> mean() + |> duplicate(column: "_stop", as: "_time") + |> window(every: inf) +``` + +{{% note %}} +Windowing requires a `_time` column which is why it's necessary to +[recreate the `_time` column](#recreate-the-time-column) after an aggregation. +{{% /note %}} + +###### Unwindowed output table +``` +Table: keys: [_start, _stop, _field, _measurement] + _start:time _stop:time _field:string _measurement:string _time:time _value:float +------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:00.000000000Z 65.88549613952637 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:00.000000000Z 65.50651391347249 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:00.000000000Z 65.30719598134358 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:00.000000000Z 64.39330975214641 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:55:00.000000000Z 64.49386278788249 +2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:55:00.000000000Z 64.49816226959229 +``` + +With the aggregate values in a single table, data points in the visualization are connected. + +![Unwindowed aggregate data](/img/flux/simple-unwindowed-data.png) + +## Summing up +You have now created a Flux query that windows and aggregates data. +The data transformation process outlined in this guide should be used for all aggregation operations. + +Flux also provides the [`aggregateWindow()` function](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow) +which performs all these separate functions for you. + +The following Flux query will return the same results: + +###### aggregateWindow function +```js +dataSet + |> aggregateWindow(every: 1m, fn: mean) +``` diff --git a/content/influxdb/v2.5/query-data/get-started/_index.md b/content/influxdb/v2.5/query-data/get-started/_index.md new file mode 100644 index 000000000..1d872140d --- /dev/null +++ b/content/influxdb/v2.5/query-data/get-started/_index.md @@ -0,0 +1,35 @@ +--- +title: Get started with Flux and InfluxDB +description: > + Get started with Flux, the functional data scripting language, and learn the + basics of writing a Flux query that queries InfluxDB. +aliases: + - /influxdb/v2.5/query-data/get-started/getting-started +weight: 101 +influxdb/v2.5/tags: [query, flux, get-started] +menu: + influxdb_2_5: + name: Get started with Flux + parent: Query data +related: + - /{{< latest "flux" >}}/get-started/ + - /{{< latest "flux" >}}/ + - /{{< latest "flux" >}}/stdlib/ +--- + +Flux is InfluxData's functional data scripting language designed for querying, +analyzing, and acting on data. + +These guides walks through important concepts related to Flux and querying time +series data from InfluxDB using Flux. + +## Tools for working with Flux +The [Execute queries](/influxdb/v2.5/query-data/execute-queries) guide walks through +the different tools available for querying InfluxDB with Flux. + +## Before you start +To get a basic understanding of the Flux data model and syntax, see +[Get started with Flux](/{{< latest "flux" >}}/get-started/) in the +[Flux documentation](/{{< latest "flux" >}}/). + +{{< page-nav next="/influxdb/v2.5/query-data/get-started/query-influxdb/" >}} diff --git a/content/influxdb/v2.5/query-data/get-started/query-influxdb.md b/content/influxdb/v2.5/query-data/get-started/query-influxdb.md new file mode 100644 index 000000000..c9105667c --- /dev/null +++ b/content/influxdb/v2.5/query-data/get-started/query-influxdb.md @@ -0,0 +1,133 @@ +--- +title: Query InfluxDB with Flux +description: Learn the basics of using Flux to query data from InfluxDB. +influxdb/v2.5/tags: [query, flux] +menu: + influxdb_2_5: + name: Query InfluxDB + parent: Get started with Flux +weight: 201 +related: + - /{{< latest "flux" >}}/get-started/query-basics/ + - /influxdb/v2.5/query-data/flux/ + - /{{< latest "flux" >}}/stdlib/influxdata/influxdb/from + - /{{< latest "flux" >}}/stdlib/universe/range + - /{{< latest "flux" >}}/stdlib/universe/filter +--- + +This guide walks through the basics of using Flux to query data from InfluxDB. +Every Flux query needs the following: + +1. [A data source](#1-define-your-data-source) +2. [A time range](#2-specify-a-time-range) +3. [Data filters](#3-filter-your-data) + + +## 1. Define your data source +Flux's [`from()`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/from/) function defines an InfluxDB data source. +It requires a [`bucket`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/from/#bucket) parameter. +The following examples use `example-bucket` as the bucket name. + +```js +from(bucket:"example-bucket") +``` + +## 2. Specify a time range +Flux requires a time range when querying time series data. +"Unbounded" queries are very resource-intensive and as a protective measure, +Flux will not query the database without a specified range. + +Use the [pipe-forward operator](/{{< latest "flux" >}}/get-started/syntax-basics/#pipe-forward-operator) +(`|>`) to pipe data from your data source into +[`range()`](/{{< latest "flux" >}}/stdlib/universe/range), which specifies a time range for your query. +It accepts two parameters: `start` and `stop`. +Start and stop values can be **relative** using negative [durations](/{{< latest "flux" >}}/data-types/basic/duration/) +or **absolute** using [timestamps](/{{< latest "flux" >}}/data-types/basic/time/). + +###### Example relative time ranges +```js +// Relative time range with start only. Stop defaults to now. +from(bucket:"example-bucket") + |> range(start: -1h) + +// Relative time range with start and stop +from(bucket:"example-bucket") + |> range(start: -1h, stop: -10m) +``` + +{{% note %}} +Relative ranges are relative to "now." +{{% /note %}} + +###### Example absolute time range +```js +from(bucket:"example-bucket") + |> range(start: 2021-01-01T00:00:00Z, stop: 2021-01-01T12:00:00Z) +``` + +#### Use the following: +For this guide, use the relative time range, `-15m`, to limit query results to data from the last 15 minutes: + +```js +from(bucket:"example-bucket") + |> range(start: -15m) +``` + +## 3. Filter your data +Pass your ranged data into `filter()` to narrow results based on data attributes or columns. +`filter()` has one parameter, `fn`, which expects a +[predicate function](/{{< latest "flux" >}}/get-started/syntax-basics/#predicate-functions) +evaluates rows by column values. + +`filter()` iterates over every input row and structures row data as a Flux +[record](/{{< latest "flux" >}}/data-types/composite/record/). +The record is passed into the predicate function as `r` where it is evaluated using +[predicate expressions](/{{< latest "flux" >}}/get-started/syntax-basics/#predicate-expressions). + +Rows that evaluate to `false` are dropped from the output data. +Rows that evaluate to `true` persist in the output data. + +```js +// Pattern +(r) => (r.recordProperty comparisonOperator comparisonExpression) + +// Example with single filter +(r) => (r._measurement == "cpu") + +// Example with multiple filters +(r) => r._measurement == "cpu" and r._field != "usage_system") +``` + +#### Use the following: +For this example, filter by the `cpu` measurement, `usage_system` field, and +`cpu-total` tag value: + +```js +from(bucket: "example-bucket") + |> range(start: -15m) + |> filter(fn: (r) => r._measurement == "cpu" and r._field == "usage_system" and r.cpu == "cpu-total") +``` + +## 4. Yield your queried data +[`yield()`](/{{< latest "flux" >}}/stdlib/universe/yield/) outputs the result of the query. + +```js +from(bucket: "example-bucket") + |> range(start: -15m) + |> filter(fn: (r) => r._measurement == "cpu" and r._field == "usage_system" and r.cpu == "cpu-total") + |> yield() +``` + +Flux automatically assumes a `yield()` function at +the end of each script to output and visualize the data. +Explicitly calling `yield()` is only necessary when including multiple queries +in the same Flux query. +Each set of returned data needs to be named using the `yield()` function. + +## Congratulations! +You have now queried data from InfluxDB using Flux. + +The query shown here is a basic example. +Flux queries can be extended in many ways to form powerful scripts. + +{{< page-nav prev="/influxdb/v2.5/query-data/get-started/" next="/influxdb/v2.5/query-data/get-started/transform-data/" >}} diff --git a/content/influxdb/v2.5/query-data/get-started/transform-data.md b/content/influxdb/v2.5/query-data/get-started/transform-data.md new file mode 100644 index 000000000..be2fcefdb --- /dev/null +++ b/content/influxdb/v2.5/query-data/get-started/transform-data.md @@ -0,0 +1,162 @@ +--- +title: Transform data with Flux +description: Learn the basics of using Flux to transform data queried from InfluxDB. +influxdb/v2.5/tags: [flux, transform, query] +menu: + influxdb_2_5: + name: Transform data + parent: Get started with Flux +weight: 202 +related: + - /{{< latest "flux" >}}/stdlib/universe/aggregatewindow + - /{{< latest "flux" >}}/stdlib/universe/window +--- + +When [querying data from InfluxDB](/influxdb/v2.5/query-data/get-started/query-influxdb), +you often need to transform that data in some way. +Common examples are aggregating data, downsampling data, etc. + +This guide demonstrates using [Flux functions](/{{< latest "flux" >}}/stdlib/) to transform your data. +It walks through creating a Flux script that partitions data into windows of time, +averages the `_value`s in each window, and outputs the averages as a new table. + +{{% note %}} +If you're not familiar with how Flux structures and operates on data, see +[Flux data model](/{{< latest "flux" >}}/get-started/data-model/). +{{% /note %}} + +## Query data +Use the query built in the previous [Query data from InfluxDB](/influxdb/v2.5/query-data/get-started/query-influxdb) +guide, but update the range to pull data from the last hour: + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "cpu" and r._field == "usage_system" and r.cpu == "cpu-total") +``` + +## Flux functions +Flux provides a number of functions that perform specific operations, transformations, and tasks. +You can also [create custom functions](/influxdb/v2.5/query-data/flux/custom-functions) in your Flux queries. +_Functions are covered in detail in the [Flux standard library](/{{< latest "flux" >}}/stdlib/) documentation._ + +A common type of function used when transforming data queried from InfluxDB is an aggregate function. +Aggregate functions take a set of `_value`s in a table, aggregate them, and transform +them into a new value. + +This example uses the [`mean()` function](/{{< latest "flux" >}}/stdlib/universe/mean) +to average values within each time window. + +{{% note %}} +The following example walks through the steps required to window and aggregate data, +but there is a [`aggregateWindow()` helper function](#helper-functions) that does it for you. +It's just good to understand the steps in the process. +{{% /note %}} + +## Window your data +Flux's [`window()` function](/{{< latest "flux" >}}/stdlib/universe/window) partitions records based on a time value. +Use the `every` parameter to define a duration of each window. + +{{% note %}} +#### Calendar months and years +`every` supports all [valid duration units](/{{< latest "flux" >}}/spec/types/#duration-types), +including **calendar months (`1mo`)** and **years (`1y`)**. +{{% /note %}} + +For this example, window data in five minute intervals (`5m`). + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "cpu" and r._field == "usage_system" and r.cpu == "cpu-total") + |> window(every: 5m) +``` + +As data is gathered into windows of time, each window is output as its own table. +When visualized, each table is assigned a unique color. + +![Windowed data tables](/img/flux/windowed-data.png) + +## Aggregate windowed data +Flux aggregate functions take the `_value`s in each table and aggregate them in some way. +Use the [`mean()` function](/{{< latest "flux" >}}/stdlib/universe/mean) to average the `_value`s of each table. + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "cpu" and r._field == "usage_system" and r.cpu == "cpu-total") + |> window(every: 5m) + |> mean() +``` + +As rows in each window are aggregated, their output table contains only a single row with the aggregate value. +Windowed tables are all still separate and, when visualized, will appear as single, unconnected points. + +![Windowed aggregate data](/img/flux/windowed-aggregates.png) + +## Add times to your aggregates +As values are aggregated, the resulting tables do not have a `_time` column because +the records used for the aggregation all have different timestamps. +Aggregate functions don't infer what time should be used for the aggregate value. +Therefore the `_time` column is dropped. + +A `_time` column is required in the [next operation](#unwindow-aggregate-tables). +To add one, use the [`duplicate()` function](/{{< latest "flux" >}}/stdlib/universe/duplicate) +to duplicate the `_stop` column as the `_time` column for each windowed table. + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "cpu" and r._field == "usage_system" and r.cpu == "cpu-total") + |> window(every: 5m) + |> mean() + |> duplicate(column: "_stop", as: "_time") +``` + +## Unwindow aggregate tables + +Use the `window()` function with the `every: inf` parameter to gather all points +into a single, infinite window. + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "cpu" and r._field == "usage_system" and r.cpu == "cpu-total") + |> window(every: 5m) + |> mean() + |> duplicate(column: "_stop", as: "_time") + |> window(every: inf) +``` + +Once ungrouped and combined into a single table, the aggregate data points will appear connected in your visualization. + +![Unwindowed aggregate data](/img/flux/windowed-aggregates-ungrouped.png) + +## Helper functions +This may seem like a lot of coding just to build a query that aggregates data, however going through the +process helps to understand how data changes "shape" as it is passed through each function. + +Flux provides (and allows you to create) "helper" functions that abstract many of these steps. +The same operation performed in this guide can be accomplished using +[`aggregateWindow()`](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow). + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "cpu" and r._field == "usage_system" and r.cpu == "cpu-total") + |> aggregateWindow(every: 5m, fn: mean) +``` + +## Congratulations! +You have now constructed a Flux query that uses Flux functions to transform your data. +There are many more ways to manipulate your data using both Flux's primitive functions +and your own custom functions, but this is a good introduction into the basic syntax and query structure. + +--- + +_For a deeper dive into windowing and aggregating data with example data output for each transformation, +view the [Window and aggregate data](/influxdb/v2.5/query-data/flux/window-aggregate) guide._ + +--- + +{{< page-nav prev="/influxdb/v2.5/query-data/get-started/query-influxdb/" >}} diff --git a/content/influxdb/v2.5/query-data/influxql/_index.md b/content/influxdb/v2.5/query-data/influxql/_index.md new file mode 100644 index 000000000..08994b3a9 --- /dev/null +++ b/content/influxdb/v2.5/query-data/influxql/_index.md @@ -0,0 +1,160 @@ +--- +title: Query data with InfluxQL +description: > + Use the [InfluxDB 1.x `/query` compatibility endpoint](/influxdb/v2.5/reference/api/influxdb-1x/query) + to query data in InfluxDB Cloud and InfluxDB OSS 2.5 with **InfluxQL**. +weight: 102 +influxdb/v2.5/tags: [influxql, query] +menu: + influxdb_2_5: + name: Query with InfluxQL + parent: Query data +cascade: + related: + - /influxdb/v2.5/reference/api/influxdb-1x/ + - /influxdb/v2.5/reference/api/influxdb-1x/query + - /influxdb/v2.5/reference/api/influxdb-1x/dbrp + - /influxdb/v2.5/tools/influxql-shell/ +--- + +In InfluxDB 1.x, data is stored in [databases](/{{< latest "influxdb" "v1" >}}/concepts/glossary/#database) +and [retention policies](/{{< latest "influxdb" "v1" >}}/concepts/glossary/#retention-policy-rp). +InfluxDB {{< current-version >}} combines and replaces database and retention +policies with [buckets](/influxdb/v2.5/reference/glossary/#bucket). +Because InfluxQL uses the 1.x data model, a database and retention policy combination +(DBRP) must be mapped to a bucket before it can be queried using InfluxQL. + +{{% note %}} +#### InfluxQL reference documentation +For complete InfluxQL reference documentation, see +[Influx Query Language in the latest InfluxDB 1.x documentation](/{{< latest "influxdb" "v1" >}}/query_language/). +{{% /note %}} + +**To use InfluxQL to query bucket data, complete the following steps:** + +1. [Verify buckets have a mapping](#verify-buckets-have-a-mapping). +2. [Create DBRP mappings for unmapped buckets](#create-dbrp-mappings-for-unmapped-buckets). +3. [Query a mapped bucket with InfluxQL](#query-a-mapped-bucket-with-influxql). + +## Verify buckets have a mapping + +Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) or the [InfluxDB API](/influxdb/v2.5/reference/api/) +to verify the buckets you want to query are mapped to a database and retention policy. +_For examples, see [List DBRP mappings](/influxdb/v2.5/query-data/influxql/dbrp/#list-dbrp-mappings)._ + +If you **do not find a DBRP mapping for a bucket**, [create a new DBRP mapping]() to +map the unmapped bucket. + +## Create DBRP mappings for unmapped buckets +Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) or the [InfluxDB API](/influxdb/v2.5/reference/api/) +to manually create DBRP mappings for unmapped buckets. +_For examples, see [Create DBRP mappings](/influxdb/v2.5/query-data/influxql/dbrp/#create-dbrp-mappings)._ + +## Query a mapped bucket with InfluxQL + +{{< tabs-wrapper >}} +{{% tabs %}} +[InfluxQL Shell](#) +[InfluxDB API](#) +{{% /tabs %}} +{{% tab-content %}} + + +The [`influx` CLI](/influxdb/v2.5/tools/influx-cli/) provides an InfluxQL shell +where you can execute InfluxQL queries in an interactive Read-Eval-Print-Loop (REPL). + +{{% note %}} +If you haven't already, be sure to do the following: + +- [Download and install the `influx` CLI](/influxdb/v2.5/tools/influx-cli/#install-the-influx-cli) +- [Configure your authentication credentials](/influxdb/v2.5/tools/influx-cli/#provide-required-authentication-credentials) +{{% /note %}} + +Use the following command to start an InfluxQL shell: + +```sh +influx v1 shell +``` + +Execute an InfluxQL query inside the InfluxQL shell. + +```sql +> SELECT used_percent FROM example-db.example-rp.example-measurement WHERE host=host1 +``` + +For more information about using the InfluxQL shell, see +[Use the InfluxQL shell](/influxdb/v2.5/tools/influxql-shell/). + + +{{% /tab-content %}} +{{% tab-content %}} + + +The [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/) supports +all InfluxDB 1.x client libraries and integrations in InfluxDB {{< current-version >}}. + +To query a mapped bucket with InfluxQL, use the [`/query` 1.x compatibility endpoint](/influxdb/v2.5/reference/api/influxdb-1x/query/). +Include the following in your request: + +- **Request method:** `GET` +- **Headers:** + - **Authorization:** _See [compatibility API authentication](/influxdb/v2.5/reference/api/influxdb-1x/#authentication)_ +- **Query parameters:** + - **db**: 1.x database to query + - **rp**: 1.x retention policy to query _(if no retention policy is specified, InfluxDB uses the default retention policy for the specified database)_ + - **q**: URL-encoded InfluxQL query + +{{% api/url-encode-note %}} + +```sh +curl --get http://localhost:8086/query?db=example-db \ + --header "Authorization: Token YourAuthToken" \ + --data-urlencode "q=SELECT used_percent FROM example-db.example-rp.example-measurement WHERE host=host1" +``` + +By default, the `/query` compatibility endpoint returns results in **JSON**. +To return results as **CSV**, include the `Accept: application/csv` header. + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## InfluxQL support + +InfluxDB {{< current-version >}} supports InfluxQL queries. +See supported and unsupported queries below. +To learn more about InfluxQL, see [Influx Query Language (InfluxQL)](/{{< latest "influxdb" "v1" >}}/query_language/). + +{{< flex >}} +{{< flex-content >}} +{{% note %}} +##### Supported InfluxQL queries + +- `DELETE`* +- `DROP MEASUREMENT`* +- `EXPLAIN ANALYZE` +- `SELECT` _(read-only)_ +- `SHOW DATABASES` +- `SHOW MEASUREMENTS` +- `SHOW TAG KEYS` +- `SHOW TAG VALUES` +- `SHOW FIELD KEYS` + +\* These commands delete data. +{{% /note %}} +{{< /flex-content >}} +{{< flex-content >}} +{{% warn %}} + +##### Unsupported InfluxQL queries + +- `SELECT INTO` +- `ALTER` +- `CREATE` +- `DROP` _(limited support)_ +- `GRANT` +- `KILL` +- `REVOKE` +{{% /warn %}} +{{< /flex-content >}} +{{< /flex >}} diff --git a/content/influxdb/v2.5/query-data/influxql/dbrp.md b/content/influxdb/v2.5/query-data/influxql/dbrp.md new file mode 100644 index 000000000..c814a0e38 --- /dev/null +++ b/content/influxdb/v2.5/query-data/influxql/dbrp.md @@ -0,0 +1,351 @@ +--- +title: Manage DBRP mappings +seotitle: Manage database and retention policy mappings +description: > + Create and manage database and retention policy (DBRP) mappings to use + InfluxQL to query InfluxDB buckets. +menu: + influxdb_2_5: + parent: Query with InfluxQL +weight: 202 +influxdb/v2.5/tags: [influxql, dbrp] +--- + +InfluxQL requires a database and retention policy (DBRP) combination to query data from. +In InfluxDB {{< current-version >}}, databases and retention policies have been +combined and replaced by InfluxDB [buckets](/influxdb/v2.5/reference/glossary/#bucket). +To query an InfluxDB {{< current-version >}} with InfluxQL, the specified DBRP +combination must be mapped to a bucket. + +- [Automatic DBRP mapping](#automatic-dbrp-mapping) +- {{% oss-only %}}[Virtual DBRP mappings](#virtual-dbrp-mappings){{% /oss-only %}} +- [Create DBRP mappings](#create-dbrp-mappings) +- [List DBRP mappings](#list-dbrp-mappings) +- [Update a DBRP mapping](#update-a-dbrp-mapping) +- [Delete a DBRP mapping](#delete-a-dbrp-mapping) + +## Automatic DBRP mapping + +InfluxDB {{< current-version >}} will automatically create DBRP mappings for you +during the following operations: + +- Writing to the [`/write` v1 compatibility endpoint](/influxdb/v2.5/reference/api/influxdb-1x/write/) +- {{% cloud-only %}}[Upgrading from InfluxDB 1.x to InfluxDB Cloud](/influxdb/v2.5/upgrade/v1-to-cloud/){{% /cloud-only %}} +- {{% oss-only %}}[Upgrading from InfluxDB 1.x to {{< current-version >}}](/influxdb/v2.5/upgrade/v1-to-v2/){{% /oss-only %}} +- {{% oss-only %}}Creating a bucket ([virtual DBRPs](#virtual-dbrp-mappings)){{% /oss-only %}} + +For more information, see [Database and retention policy mapping](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/). + +{{% oss-only %}} + +## Virtual DBRP mappings + +InfluxDB {{< current-version >}} provides "virtual" DBRP mappings for any bucket +that does not have an explicit DBRP mapping associated with it. +Virtual DBRP mappings use the bucket name to provide a DBRP mapping that can be +used without having to explicitly define a mapping. + +Virtual DBRP mappings are read-only. +To override a virtual DBRP mapping, [create an explicit mapping](#create-dbrp-mappings). + +For information about how virtual DBRP mappings are created, see +[Database and retention policy mapping – When creating a bucket](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/#when-creating-a-bucket). + +{{% /oss-only %}} + +## Create DBRP mappings + +Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) or the +[InfluxDB API](/influxdb/v2.5/reference/api/) to create DBRP mappings. + +{{% note %}} +#### A DBRP combination can only be mapped to a single bucket +Each unique DBRP combination can only be mapped to a single bucket. +If you map a DBRP combination that is already mapped to another bucket, +it will overwrite the existing DBRP mapping. +{{% /note %}} + +{{< tabs-wrapper >}} +{{% tabs %}} +[influx CLI](#) +[InfluxDB API](#) +{{% /tabs %}} +{{% tab-content %}} + +Use the [`influx v1 dbrp create` command](/influxdb/v2.5/reference/cli/influx/v1/dbrp/create/) +to map an unmapped bucket to a database and retention policy. +Include the following: + +{{< req type="key" >}} + +- {{< req "\*" >}} **org** and **token** to authenticate. We recommend setting your organization and token to your active InfluxDB connection configuration in the influx CLI, so you don't have to add these parameters to each command. To set up your active InfluxDB configuration, see [`influx config set`](/influxdb/v2.5/reference/cli/influx/config/set/). +- {{< req "\*" >}} **database name** to map +- {{< req "\*" >}} **retention policy** name to map +- {{< req "\*" >}} [Bucket ID](/influxdb/v2.5/organizations/buckets/view-buckets/#view-buckets-in-the-influxdb-ui) to map to +- **Default flag** to set the provided retention policy as the default retention policy for the database + +```sh +influx v1 dbrp create \ + --db example-db \ + --rp example-rp \ + --bucket-id 00oxo0oXx000x0Xo \ + --default +``` + +{{% /tab-content %}} +{{% tab-content %}} + +Use the [`/api/v2/dbrps` API endpoint](/influxdb/v2.5/api/#operation/PostDBRP) to create a new DBRP mapping. + + +{{< api-endpoint endpoint="http://localhost:8086/api/v2/dbrps" method="POST" >}} + + +Include the following: + +- **Request method:** `POST` +- **Headers:** + - **Authorization:** `Token` schema with your InfluxDB [API token](/influxdb/v2.5/security/tokens/) + - **Content-type:** `application/json` +- **Request body:** JSON object with the following fields: + {{< req type="key" >}} + - {{< req "\*" >}} **bucketID:** [bucket ID](/influxdb/v2.5/organizations/buckets/view-buckets/) + - {{< req "\*" >}} **database:** database name + - **default:** set the provided retention policy as the default retention policy for the database + - {{< req "\*" >}} **org** or **orgID:** organization name or [organization ID](/influxdb/v2.5/organizations/view-orgs/#view-your-organization-id) + - {{< req "\*" >}} **retention_policy:** retention policy name + + +```sh +curl --request POST http://localhost:8086/api/v2/dbrps \ + --header "Authorization: Token YourAuthToken" \ + --header 'Content-type: application/json' \ + --data '{ + "bucketID": "00oxo0oXx000x0Xo", + "database": "example-db", + "default": true, + "orgID": "00oxo0oXx000x0Xo", + "retention_policy": "example-rp" + }' +``` + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## List DBRP mappings + +Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) or the [InfluxDB API](/influxdb/v2.5/reference/api/) +to list all DBRP mappings and verify the buckets you want to query are mapped +to a database and retention policy. + +{{< tabs-wrapper >}} +{{% tabs %}} +[influx CLI](#) +[InfluxDB API](#) +{{% /tabs %}} +{{% tab-content %}} + +Use the [`influx v1 dbrp list` command](/influxdb/v2.5/reference/cli/influx/v1/dbrp/list/) to list DBRP mappings. + +{{% note %}} +The examples below assume that your organization and API token are +provided by the active [InfluxDB connection configuration](/influxdb/v2.5/reference/cli/influx/config/) in the `influx` CLI. +If not, include your organization (`--org`) and API token (`--token`) with each command. +{{% /note %}} + +##### View all DBRP mappings +```sh +influx v1 dbrp list +``` + +##### Filter DBRP mappings by database +```sh +influx v1 dbrp list --db example-db +``` + +##### Filter DBRP mappings by bucket ID +```sh +influx v1 dbrp list --bucket-id 00oxo0oXx000x0Xo +``` +{{% /tab-content %}} +{{% tab-content %}} +Use the [`/api/v2/dbrps` API endpoint](/influxdb/v2.5/api/#operation/GetDBRPs) to list DBRP mappings. + + +{{< api-endpoint endpoint="http://localhost:8086/api/v2/dbrps" method="GET" >}} + + +Include the following: + +- **Request method:** `GET` +- **Headers:** + - **Authorization:** `Token` schema with your InfluxDB [API token](/influxdb/v2.5/security/tokens/) +- **Query parameters:** + {{< req type="key" >}} + - {{< req "\*" >}} **orgID:** [organization ID](/influxdb/v2.5/organizations/view-orgs/#view-your-organization-id) + - **bucketID:** [bucket ID](/influxdb/v2.5/organizations/buckets/view-buckets/) _(to list DBRP mappings for a specific bucket)_ + - **database:** database name _(to list DBRP mappings with a specific database name)_ + - **rp:** retention policy name _(to list DBRP mappings with a specific retention policy name)_ + - **id:** DBRP mapping ID _(to list a specific DBRP mapping)_ + +##### View all DBRP mappings +```sh +curl --request GET \ + http://localhost:8086/api/v2/dbrps?orgID=00oxo0oXx000x0Xo \ + --header "Authorization: Token YourAuthToken" +``` + +##### Filter DBRP mappings by database +```sh +curl --request GET \ + http://localhost:8086/api/v2/dbrps?orgID=00oxo0oXx000x0Xo&db=example-db \ + --header "Authorization: Token YourAuthToken" +``` + +##### Filter DBRP mappings by bucket ID +```sh +curl --request GET \ + https://cloud2.influxdata.com/api/v2/dbrps?organization_id=00oxo0oXx000x0Xo&bucketID=00oxo0oXx000x0Xo \ + --header "Authorization: Token YourAuthToken" +``` +{{% /tab-content %}} +{{% /tabs-wrapper %}} + +## Update a DBRP mapping + +Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) or the +[InfluxDB API](/influxdb/v2.5/reference/api/) to update a DBRP mapping. + +{{% oss-only %}} + +{{% note %}} +Virtual DBRP mappings cannot be updated. +To override a virtual DBRP mapping, [create an explicit mapping](#create-dbrp-mappings). +{{% /note %}} + +{{% /oss-only %}} + +{{< tabs-wrapper >}} +{{% tabs %}} +[influx CLI](#) +[InfluxDB API](#) +{{% /tabs %}} +{{% tab-content %}} + +Use the [`influx v1 dbrp update` command](/influxdb/v2.5/reference/cli/influx/v1/dbrp/update/) +to update a DBRP mapping. +Include the following: + +{{< req type="key" >}} + +- {{< req "\*" >}} **org** and **token** to authenticate. We recommend setting your organization and token to your active InfluxDB connection configuration in the influx CLI, so you don't have to add these parameters to each command. To set up your active InfluxDB configuration, see [`influx config set`](/influxdb/v2.5/reference/cli/influx/config/set/). +- {{< req "\*" >}} **DBRP mapping ID** to update +- **Retention policy** name to update to +- **Default flag** to set the retention policy as the default retention policy for the database + +##### Update the default retention policy +```sh +influx v1 dbrp update \ + --id 00oxo0X0xx0XXoX0 + --rp example-rp \ + --default +``` + +{{% /tab-content %}} +{{% tab-content %}} + +Use the [`/api/v2/dbrps/{dbrpID}` API endpoint](/influxdb/v2.5/api/#operation/GetDBRPs) to update DBRP mappings. + + +{{< api-endpoint endpoint="http://localhost:8086/api/v2/dbrps/{dbrpID}" method="PATCH" >}} + + +Include the following: + +{{< req type="key" >}} + +- **Request method:** `PATCH` +- **Headers:** + - {{< req "\*" >}} **Authorization:** `Token` schema with your InfluxDB [API token](/influxdb/v2.5/security/tokens/) +- **Path parameters:** + - {{< req "\*" >}} **id:** DBRP mapping ID to update +- **Query parameters:** + - {{< req "\*" >}} **orgID:** [organization ID](/influxdb/v2.5/organizations/view-orgs/#view-your-organization-id) +- **Request body (JSON):** + - **rp:** retention policy name to update to + - **default:** set the retention policy as the default retention policy for the database + +##### Update the default retention policy +```sh +curl --request PATCH \ + http://localhost:8086/api/v2/dbrps/00oxo0X0xx0XXoX0?orgID=00oxo0oXx000x0Xo \ + --header "Authorization: Token YourAuthToken" + --data '{ + "rp": "example-rp", + "default": true + }' +``` +{{% /tab-content %}} +{{% /tabs-wrapper %}} + +## Delete a DBRP mapping + +Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) or the +[InfluxDB API](/influxdb/v2.5/reference/api/) to delete a DBRP mapping. + +{{% oss-only %}} + +{{% note %}} +Virtual DBRP mappings cannot be deleted. +{{% /note %}} + +{{% /oss-only %}} + +{{< tabs-wrapper >}} +{{% tabs %}} +[influx CLI](#) +[InfluxDB API](#) +{{% /tabs %}} +{{% tab-content %}} + +Use the [`influx v1 dbrp delete` command](/influxdb/v2.5/reference/cli/influx/v1/dbrp/delete/) +to delete a DBRP mapping. +Include the following: + +{{< req type="key" >}} + +- {{< req "\*" >}} **org** and **token** to authenticate. We recommend setting your organization and token to your active InfluxDB connection configuration in the influx CLI, so you don't have to add these parameters to each command. To set up your active InfluxDB configuration, see [`influx config set`](/influxdb/v2.5/reference/cli/influx/config/set/). +- {{< req "\*" >}} **DBRP mapping ID** to delete + +```sh +influx v1 dbrp delete --id 00oxo0X0xx0XXoX0 +``` + +{{% /tab-content %}} +{{% tab-content %}} + +Use the [`/api/v2/dbrps/{dbrpID}` API endpoint](/influxdb/v2.5/api/#operation/DeleteDBRPID) to delete a DBRP mapping. + + +{{< api-endpoint endpoint="http://localhost:8086/api/v2/dbrps/{dbrpID}" method="DELETE" >}} + + +Include the following: + +{{< req type="key" >}} + +- **Request method:** `PATCH` +- **Headers:** + - {{< req "\*" >}} **Authorization:** `Token` schema with your InfluxDB [API token](/influxdb/v2.5/security/tokens/) +- **Path parameters:** + - {{< req "\*" >}} **id:** DBRP mapping ID to update +- **Query parameters:** + - {{< req "\*" >}} **orgID:** [organization ID](/influxdb/v2.5/organizations/view-orgs/#view-your-organization-id) + +```sh +curl --request DELETE \ + http://localhost:8086/api/v2/dbrps/00oxo0X0xx0XXoX0?orgID=00oxo0oXx000x0Xo \ + --header "Authorization: Token YourAuthToken" +``` +{{% /tab-content %}} +{{% /tabs-wrapper %}} diff --git a/content/influxdb/v2.5/query-data/optimize-queries.md b/content/influxdb/v2.5/query-data/optimize-queries.md new file mode 100644 index 000000000..d3b8fc792 --- /dev/null +++ b/content/influxdb/v2.5/query-data/optimize-queries.md @@ -0,0 +1,190 @@ +--- +title: Optimize Flux queries +description: > + Optimize your Flux queries to reduce their memory and compute (CPU) requirements. +weight: 104 +menu: + influxdb_2_5: + name: Optimize queries + parent: Query data +influxdb/v2.5/tags: [query] +--- + +Optimize your Flux queries to reduce their memory and compute (CPU) requirements. + +- [Start queries with pushdowns](#start-queries-with-pushdowns) + - [Avoid processing filters inline](#avoid-processing-filters-inline) +- [Avoid short window durations](#avoid-short-window-durations) +- [Use "heavy" functions sparingly](#use-heavy-functions-sparingly) +- [Use set() instead of map() when possible](#use-set-instead-of-map-when-possible) +- [Balance time range and data precision](#balance-time-range-and-data-precision) +- [Measure query performance with Flux profilers](#measure-query-performance-with-flux-profilers) + +## Start queries with pushdowns +**Pushdowns** are functions or function combinations that push data operations +to the underlying data source rather than operating on data in memory. +Start queries with pushdowns to improve query performance. +Once a non-pushdown function runs, Flux pulls data into memory and runs all +subsequent operations there. + +#### Pushdown functions and function combinations +Most pushdowns are supported when querying an InfluxDB {{< current-version keep=true >}} or InfluxDB Cloud data source. +As shown in the following table, a handful of pushdowns are not supported in InfluxDB {{< current-version keep=true >}}. + +| Functions | InfluxDB {{< current-version keep=true >}} | InfluxDB Cloud | +| :----------------------------- | :----------------------------------------: | :------------------: | +| **count()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **duplicate()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **filter()** {{% req " \*" %}} | {{< icon "check" >}} | {{< icon "check" >}} | +| **fill()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **first()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **last()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **max()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **mean()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **min()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **range()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **rename()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **sum()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **window()** | {{< icon "check" >}} | {{< icon "check" >}} | +| _Function combinations_ | | | +| **group()** \|> **count()** | | {{< icon "check" >}} | +| **group()** \|> **first()** | | {{< icon "check" >}} | +| **group()** \|> **last()** | | {{< icon "check" >}} | +| **group()** \|> **max()** | | {{< icon "check" >}} | +| **group()** \|> **min()** | | {{< icon "check" >}} | +| **group()** \|> **sum()** | | {{< icon "check" >}} | +| **sort()** \|> **limit()** | | {{< icon "check" >}} | +| **window()** \|> **count()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **window()** \|> **first()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **window()** \|> **last()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **window()** \|> **max()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **window()** \|> **min()** | {{< icon "check" >}} | {{< icon "check" >}} | +| **window()** \|> **sum()** | {{< icon "check" >}} | {{< icon "check" >}} | + +{{% caption %}} +{{< req "\*" >}} **filter()** only pushes down when all parameter values are static. +See [Avoid processing filters inline](#avoid-processing-filters-inline). +{{% /caption %}} + +Use pushdown functions and function combinations at the beginning of your query. +Once a non-pushdown function runs, Flux pulls data into memory and runs all +subsequent operations there. + +##### Pushdown functions in use +```js +from(bucket: "example-bucket") + |> range(start: -1h) // + |> filter(fn: (r) => r.sensor == "abc123") // + |> group(columns: ["_field", "host"]) // Pushed to the data source + |> aggregateWindow(every: 5m, fn: max) // + |> filter(fn: (r) => r._value >= 90.0) // + + |> top(n: 10) // Run in memory +``` + +### Avoid processing filters inline +Avoid using mathematic operations or string manipulation inline to define data filters. +Processing filter values inline prevents `filter()` from pushing its operation down +to the underlying data source, so data returned by the +previous function loads into memory. +This often results in a significant performance hit. + +For example, the following query uses [dashboard variables](/influxdb/v2.5/visualize-data/variables/) +and string concatenation to define a region to filter by. +Because `filter()` uses string concatenation inline, it can't push its operation +to the underlying data source and loads all data returned from `range()` into memory. + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r.region == v.provider + v.region) +``` + +To dynamically set filters and maintain the pushdown ability of the `filter()` function, +use variables to define filter values outside of `filter()`: + +```js +region = v.provider + v.region + +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r.region == region) +``` + +## Avoid short window durations +Windowing (grouping data based on time intervals) is commonly used to aggregate and downsample data. +Increase performance by avoiding short window durations. +More windows require more compute power to evaluate which window each row should be assigned to. +Reasonable window durations depend on the total time range queried. + +## Use "heavy" functions sparingly +The following functions use more memory or CPU than others. +Consider their necessity in your data processing before using them: + +- [map()](/{{< latest "flux" >}}/stdlib/universe/map/) +- [reduce()](/{{< latest "flux" >}}/stdlib/universe/reduce/) +- [join()](/{{< latest "flux" >}}/stdlib/universe/join/) +- [union()](/{{< latest "flux" >}}/stdlib/universe/union/) +- [pivot()](/{{< latest "flux" >}}/stdlib/universe/pivot/) + +{{% note %}} +We're continually optimizing Flux and this list may not represent its current state. +{{% /note %}} + +## Use set() instead of map() when possible +[`set()`](/{{< latest "flux" >}}/stdlib/universe/set/), +[`experimental.set()`](/{{< latest "flux" >}}/stdlib/experimental/set/), +and [`map`](/{{< latest "flux" >}}/stdlib/universe/map/) +can each set columns value in data, however **set** functions have performance +advantages over `map()`. + +Use the following guidelines to determine which to use: + +- If setting a column value to a predefined, static value, use `set()` or `experimental.set()`. +- If dynamically setting a column value using **existing row data**, use `map()`. + +#### Set a column value to a static value +The following queries are functionally the same, but using `set()` is more performant than using `map()`. + +```js +data + |> map(fn: (r) => ({ r with foo: "bar" })) + +// Recommended +data + |> set(key: "foo", value: "bar") +``` + +#### Dynamically set a column value using existing row data +```js +data + |> map(fn: (r) => ({ r with foo: r.bar })) +``` + +## Balance time range and data precision +To ensure queries are performant, balance the time range and the precision of your data. +For example, if you query data stored every second and request six months worth of data, +results would include ≈15.5 million points per series. Depending on the number of series returned after `filter()`([cardinality](/influxdb/v2.5/reference/glossary/#series-cardinality)), this can quickly become many billions of points. +Flux must store these points in memory to generate a response. Use [pushdowns](#pushdown-functions-and-function-combinations) to optimize how many points are stored in memory. + +To query data over large periods of time, create a task to [downsample data](/influxdb/v2.5/process-data/common-tasks/downsample-data/), and then query the downsampled data instead. + +## Measure query performance with Flux profilers +Use the [Flux Profiler package](/{{< latest "flux" >}}/stdlib/profiler/) +to measure query performance and append performance metrics to your query output. +The following Flux profilers are available: + +- **query**: provides statistics about the execution of an entire Flux script. +- **operator**: provides statistics about each operation in a query. + +Import the `profiler` package and enable profilers with the `profile.enabledProfilers` option. + +```js +import "profiler" + +option profiler.enabledProfilers = ["query", "operator"] + +// Query to profile +``` + +For more information about Flux profilers, see the [Flux Profiler package](/{{< latest "flux" >}}/stdlib/profiler/). diff --git a/content/influxdb/v2.5/reference/_index.md b/content/influxdb/v2.5/reference/_index.md new file mode 100644 index 000000000..85629285e --- /dev/null +++ b/content/influxdb/v2.5/reference/_index.md @@ -0,0 +1,8 @@ +--- +title: InfluxDB reference +description: > + Reference documentation for InfluxDB including release notes, API documentation, + tools, syntaxes, database internals, and more. +--- + +{{< children >}} \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/api/_index.md b/content/influxdb/v2.5/reference/api/_index.md new file mode 100644 index 000000000..88b45dd97 --- /dev/null +++ b/content/influxdb/v2.5/reference/api/_index.md @@ -0,0 +1,30 @@ +--- +title: InfluxDB v2 API +description: > + The InfluxDB v2 API provides a programmatic interface for interactions with InfluxDB. + Access the InfluxDB API using the `/api/v2/` endpoint. +menu: influxdb_2_5_ref +weight: 3 +influxdb/v2.5/tags: [api] +aliases: + - /influxdb/v2.5/concepts/api/ +--- + +The InfluxDB v2 API provides a programmatic interface for interactions with InfluxDB. +Access the InfluxDB API using the `/api/v2/` endpoint. + +## InfluxDB v2 API documentation +InfluxDB OSS {{< current-version >}} API documentation + +#### View InfluxDB API documentation locally +InfluxDB API documentation is built into the `influxd` service and represents +the API specific to the current version of InfluxDB. +To view the API documentation locally, [start InfluxDB](/influxdb/v2.5/get-started/#start-influxdb) +and visit the `/docs` endpoint in a browser ([localhost:8086/docs](http://localhost:8086/docs)). + +## InfluxDB v1 compatibility API documentation +The InfluxDB v2 API includes [InfluxDB 1.x compatibility endpoints](/influxdb/v2.5/reference/api/influxdb-1x/) +that work with InfluxDB 1.x client libraries and third-party integrations like +[Grafana](https://grafana.com) and others. + +View full v1 compatibility API documentation diff --git a/content/influxdb/v2.5/reference/api/influxdb-1x/_index.md b/content/influxdb/v2.5/reference/api/influxdb-1x/_index.md new file mode 100644 index 000000000..8aae81c3a --- /dev/null +++ b/content/influxdb/v2.5/reference/api/influxdb-1x/_index.md @@ -0,0 +1,268 @@ +--- +title: InfluxDB 1.x compatibility API +description: > + The InfluxDB v2 API includes InfluxDB 1.x compatibility endpoints that work with + InfluxDB 1.x client libraries and third-party integrations like [Grafana](https://grafana.com) and others. +menu: + influxdb_2_5_ref: + name: 1.x compatibility + parent: InfluxDB v2 API +weight: 104 +influxdb/v2.5/tags: [influxql, query, write] +related: + - /influxdb/v2.5/query-data/influxql + - /influxdb/v2.5/upgrade/v1-to-v2/ +--- + +The InfluxDB v2 API includes InfluxDB 1.x compatibility endpoints that work with +InfluxDB 1.x client libraries and third-party integrations like [Grafana](https://grafana.com) and others. + +View full v1 compatibility API documentation + +## Authentication + +InfluxDB 1.x compatibility endpoints require all query and write requests to be authenticated with an +[API token](/influxdb/v2.5/security/tokens/) or 1.x-compatible +credentials. + +* [Authenticate with the Token scheme](#authenticate-with-the-token-scheme) +* [Authenticate with a 1.x username and password scheme](#authenticate-with-a-username-and-password-scheme) + +### Authenticate with the Token scheme +Token authentication requires the following credential: + +- **token**: InfluxDB [API token](/influxdb/v2.5/security/tokens/) + +Use the `Authorization` header with the `Token` scheme to provide your token to InfluxDB. + +#### Syntax + +```sh +Authorization: Token INFLUX_API_TOKEN +``` + +#### Example + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[curl](#curl) +[Node.js](#nodejs) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +{{% get-shared-text "api/v1-compat/auth/oss/token-auth.sh" %}} +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +{{% get-shared-text "api/v1-compat/auth/oss/token-auth.js" %}} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +### Authenticate with a username and password scheme + +Use the following authentication schemes with clients that support the InfluxDB 1.x convention of `username` and `password` (that don't support the `Authorization: Token` scheme): + +- [Basic authentication](#basic-authentication) +- [Query string authentication](#query-string-authentication) + +#### Manage credentials + +{{% oss-only %}} + +Username and password schemes require the following credentials: +- **username**: 1.x username (this is separate from the UI login username) +- **password**: 1.x password or InfluxDB API token. + +{{% note %}} +#### Password or Token +If you have [set a password](/influxdb/v2.5/upgrade/v1-to-v2/manual-upgrade/#1x-compatible-authorizations) for the 1.x-compatible username, provide the 1.x-compatible password. +If you haven't set a password for the 1.x-compatible username, provide the InfluxDB [authentication token](/influxdb/v2.5/security/tokens/) as the password. +{{% /note %}} + +For information about creating and managing 1.x-compatible authorizations, see: + +- [`influx v1 auth` command](/influxdb/v2.5/reference/cli/influx/v1/auth/) +- [Manually upgrade – 1.x-compatible authorizations](/influxdb/v2.5/upgrade/v1-to-v2/manual-upgrade/#1x-compatible-authorizations) + +{{% /oss-only %}} + +{{% cloud-only %}} + +- **username**: InfluxDB Cloud username + (Use the email address you signed up with as your username, _e.g._ `exampleuser@influxdata.com`.) +- **password**: InfluxDB Cloud [API token](/influxdb/cloud/security/tokens/) + +{{% /cloud-only %}} + +#### Basic authentication + +Use the `Authorization` header with the `Basic` scheme to provide username and +password credentials to InfluxDB. + +{{% api/v1-compat/basic-auth-syntax %}} + +##### Syntax + +{{% oss-only %}} + +```sh +Authorization: Basic INFLUX_USERNAME:INFLUX_PASSWORD_OR_TOKEN +``` + +{{% /oss-only %}} + + +{{% cloud-only %}} + +```sh +Authorization: Basic exampleuser@influxdata.com:INFLUX_API_TOKEN +``` + +{{% /cloud-only %}} + +##### Example + +{{% oss-only %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[curl](#curl) +[Node.js](#nodejs) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +{{% get-shared-text "api/v1-compat/auth/oss/basic-auth.sh" %}} +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +{{% get-shared-text "api/v1-compat/auth/oss/basic-auth.js" %}} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /oss-only %}} + + +{{% cloud-only %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[curl](#curl) +[Node.js](#nodejs) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +{{% get-shared-text "api/v1-compat/auth/cloud/basic-auth.sh" %}} +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```js +{{% get-shared-text "api/v1-compat/auth/cloud/basic-auth.js" %}} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +Replace the following: +- *`exampleuser@influxdata.com`*: the email address that you signed up with +- *`INFLUX_API_TOKEN`*: your [InfluxDB API token](/influxdb/cloud/reference/glossary/#token) + +{{% /cloud-only %}} + +#### Query string authentication +Use InfluxDB 1.x API parameters to provide credentials through the query string. + +{{% note %}} +##### Consider when using query string parameters + +- URL-encode query parameters that may contain whitespace or other special characters. +- Be aware of the [risks](https://owasp.org/www-community/vulnerabilities/Information_exposure_through_query_strings_in_url) when exposing sensitive data through URLs. +{{% /note %}} + +##### Syntax + +{{% oss-only %}} + +```sh + /query/?u=INFLUX_USERNAME&p=INFLUX_PASSWORD_OR_TOKEN + /write/?u=INFLUX_USERNAME&p=INFLUX_PASSWORD_OR_TOKEN + ``` + +{{% /oss-only %}} + +{{% cloud-only %}} + +```sh +/query/?u=INFLUX_USERNAME&p=INFLUX_API_TOKEN +/write/?u=INFLUX_USERNAME&p=INFLUX_API_TOKEN +``` + +{{% /cloud-only %}} + +##### Example + +{{% oss-only %}} +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[curl](#curl) +[Node.js](#nodejs) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +{{< get-shared-text "api/v1-compat/auth/oss/querystring-auth.sh" >}} +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +{{< get-shared-text "api/v1-compat/auth/oss/querystring-auth.js" >}} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +Replace the following: +- *`INFLUX_USERNAME`*: [InfluxDB 1.x username](#manage-credentials) +- *`INFLUX_PASSWORD_OR_TOKEN`*: [InfluxDB 1.x password or InfluxDB API token](#manage-credentials) + +{{% /oss-only %}} + +{{% cloud-only %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[curl](#curl) +[Node.js](#nodejs) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +{{% get-shared-text "api/v1-compat/auth/cloud/basic-auth.sh" %}} +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```js +{{% get-shared-text "api/v1-compat/auth/cloud/basic-auth.js" %}} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +Replace the following: +- *`exampleuser@influxdata.com`*: the email address that you signed up with +- *`INFLUX_API_TOKEN`*: your [InfluxDB API token](/influxdb/cloud/reference/glossary/#token) + +{{% /cloud-only %}} + +##### InfluxQL support + +The compatibility API supports InfluxQL, with the following caveats: + +- The `INTO` clause (e.g. `SELECT ... INTO ...`) is not supported. +- With the exception of [`DELETE`](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#delete-series-with-delete) and + [`DROP MEASUREMENT`](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#delete-measurements-with-drop-measurement) queries, which are still allowed, + InfluxQL database management commands are not supported. + +## Compatibility endpoints + +{{< children readmore=true >}} diff --git a/content/influxdb/v2.5/reference/api/influxdb-1x/dbrp.md b/content/influxdb/v2.5/reference/api/influxdb-1x/dbrp.md new file mode 100644 index 000000000..09e74363b --- /dev/null +++ b/content/influxdb/v2.5/reference/api/influxdb-1x/dbrp.md @@ -0,0 +1,125 @@ +--- +title: Database and retention policy mapping +description: > + The database and retention policy (DBRP) mapping service maps InfluxDB 1.x + database and retention policy combinations to InfluxDB Cloud and InfluxDB OSS 2.x buckets. +menu: + influxdb_2_5_ref: + name: DBRP mapping + parent: 1.x compatibility +weight: 302 +related: + - /influxdb/v2.5/reference/api/influxdb-1x/query + - /influxdb/v2.5/reference/api/influxdb-1x/write + - /influxdb/v2.5/api/#tag/DBRPs, InfluxDB v2 API /dbrps endpoint + - /influxdb/v2.5/query-data/influxql/ +--- + +The InfluxDB 1.x data model includes [databases](/influxdb/v1.8/concepts/glossary/#database) +and [retention policies](/influxdb/v1.8/concepts/glossary/#retention-policy-rp). +InfluxDB {{< current-version >}} replaces databases and retention policies with +[buckets](/influxdb/v2.5/reference/glossary/#bucket). +To support InfluxDB 1.x query and write patterns in InfluxDB {{< current-version >}}, +databases and retention policies are mapped to buckets using the +**database and retention policy (DBRP) mapping service**. + +The DBRP mapping service uses the **database** and **retention policy** specified in +[1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/) requests to route operations to a bucket. + +{{% cloud-only %}} + +{{% note %}} +To query data in InfluxQL that was written using the `api/v2/write` API, +you must **manually create a DBRP mapping** to map a bucket to a database and retention policy. +For more information, see [Create DBRP mappings](/influxdb/v2.5/query-data/influxql/dbrp/#create-dbrp-mappings). +{{% /note %}} + +{{% /cloud-only %}} + +### Default retention policies + +A database can have multiple retention policies with one set as default. +If no retention policy is specified in a query or write request, InfluxDB uses +the default retention policy for the specified database. +Use the `influx` CLI or the InfluxDB API to set a retention policy as the +default retention policy for a database. + +{{% oss-only %}} + +### When creating a bucket + +When you [create a bucket](/influxdb/v2.5/organizations/buckets/create-bucket/), +InfluxDB {{< current-version >}} automatically creates a "virtual" DBRP mapping. +Virtual DBRP mappings are those that are created on your behalf. + +- **If your bucket name includes a forward slash (`/`)**, the virtual DBRP mapping + uses everything before the forward slash as the database name and everything + after the forward slash as the retention policy name. + If the database does not already have a default retention policy, the parsed + retention policy is set as the default. +- **If your bucket name does not include a forward slash (`/`)**, the virtual DBRP + mapping uses the bucket name as the database and `autogen` as the retention + policy. The `autogen` retention policy is set as the default retention policy. + +{{% /oss-only %}} + +### When writing data + +{{% oss-only %}} + +When writing data using the +[`/write` compatibility endpoint](/influxdb/v2.5/reference/api/influxdb-1x/write/), +the DBRP mapping service uses the database and retention policy specified +in the request to write the data to the appropriate bucket. + +{{% /oss-only %}} + +{{% cloud-only %}} + +When writing data using the +[`/write` compatibility endpoint](/influxdb/v2.5/reference/api/influxdb-1x/write/), +the DBRP mapping service checks for a bucket mapped to the database and retention policy: + +- If a mapped bucket is found, data is written to the bucket. +- If an unmapped bucket with a name matching: + - **database/retention policy** exists, a DBRP mapping is added to the bucket, + and data is written to the bucket. + - **database** exists (without a specified retention policy), the default + database retention policy is used, a DBRP mapping is added to the bucket, + and data is written to the bucket. + +{{% /cloud-only %}} + +### When querying data + +{{% oss-only %}} + +When querying data from InfluxDB {{< current-version >}} +using the [`/query` compatibility endpoint](/influxdb/v2.5/reference/api/influxdb-1x/query/), +the DBRP mapping service uses the database and retention policy specified in the +request to query data from the appropriate bucket. +If no retention policy is specified, the database's default retention policy is used. + +{{% /oss-only %}} + +{{% cloud-only %}} + +When querying data from InfluxDB {{< current-version >}} +using the [`/query` compatibility endpoint](/influxdb/v2.5/reference/api/influxdb-1x/query/), +the DBRP mapping service checks for the specified database and retention policy +(if no retention policy is specified, the database's default retention policy is used): + +- If a mapped bucket exists, data is queried from the mapped bucket. +- If no mapped bucket exists, InfluxDB returns an error. + See how to [Create DBRP mappings](/influxdb/v2.5/query-data/influxql/dbrp/#create-dbrp-mappings). + +_For more information on the DBRP mapping API, see the [`/api/v2/dbrps` endpoint documentation](/influxdb/v2.5/api/#tag/DBRPs)._ + +{{% /cloud-only %}} + +{{% note %}} +#### A DBRP combination can only be mapped to a single bucket +Each unique DBRP combination can only be mapped to a single bucket. +If you map a DBRP combination that is already mapped to another bucket, +it will overwrite the existing DBRP mapping. +{{% /note %}} diff --git a/content/influxdb/v2.5/reference/api/influxdb-1x/query.md b/content/influxdb/v2.5/reference/api/influxdb-1x/query.md new file mode 100644 index 000000000..c3f2b135c --- /dev/null +++ b/content/influxdb/v2.5/reference/api/influxdb-1x/query.md @@ -0,0 +1,177 @@ +--- +title: /query 1.x compatibility API +list_title: /query +description: > + The `/query` 1.x compatibility endpoint queries InfluxDB Cloud and InfluxDB OSS 2.x using **InfluxQL**. +menu: + influxdb_2_5_ref: + name: /query + parent: 1.x compatibility +weight: 301 +influxdb/v2.5/tags: [influxql, query] +list_code_example: | +
+  GET http://localhost:8086/query
+  
+related: + - /influxdb/v2.5/query-data/influxql +--- + +The `/query` 1.x compatibility endpoint queries InfluxDB {{< current-version >}} using **InfluxQL**. +Use the `GET` request method to query data from the `/query` endpoint. + +
+GET http://localhost:8086/query
+
+ +The `/query` compatibility endpoint uses the **database** and **retention policy** +specified in the query request to map the request to an InfluxDB bucket. +For more information, see [Database and retention policy mapping](/influxdb/v2.5/reference/api/influxdb-1x/dbrp). + +{{% cloud-only %}} + +{{% note %}} +If you have an existing bucket that doesn't follow the **database/retention-policy** naming convention, +you **must** [manually create a database and retention policy mapping](/influxdb/v2.5/query-data/influxql/dbrp/#create-dbrp-mappings) +to query that bucket with the `/query` compatibility API. +{{% /note %}} + +{{% /cloud-only %}} + +## Authentication + +Use one of the following authentication methods: +* **token authentication** +* **basic authentication with username and password** +* **query string authentication with username and password** + +_For more information, see [Authentication](/influxdb/v2.5/reference/api/influxdb-1x/#authentication)._ + +## Query string parameters + +### u +(Optional) The 1.x **username** to authenticate the request. +_See [query string authentication](/influxdb/v2.5/reference/api/influxdb-1x/#query-string-authentication)._ + +### p +(Optional) The 1.x **password** to authenticate the request. +_See [query string authentication](/influxdb/v2.5/reference/api/influxdb-1x/#query-string-authentication)._ + +### db +({{< req >}}) The **database** to query data from. +This is mapped to an InfluxDB [bucket](/influxdb/v2.5/reference/glossary/#bucket). +_See [Database and retention policy mapping](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/)._ + +### rp +The **retention policy** to query data from. +This is mapped to an InfluxDB [bucket](/influxdb/v2.5/reference/glossary/#bucket). +_See [Database and retention policy mapping](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/)._ + +### q +({{< req >}}) The **InfluxQL** query to execute. +To execute multiple queries, delimit queries with a semicolon (`;`). + +### epoch +Return results with [Unix timestamps](/influxdb/v2.5/reference/glossary/#unix-timestamp) +(also known as epoch timestamps) in the specified precision instead of +[RFC3339 timestamps](/influxdb/v2.5/reference/glossary/#rfc3339-timestamp) with nanosecond precision. +The following precisions are available: + +- `ns` - nanoseconds +- `u` or `µ` - microseconds +- `ms` - milliseconds +- `s` - seconds +- `m` - minutes +- `h` - hours + +## Query examples + +- [Query using basic authentication](#query-using-basic-authentication) +- [Query a non-default retention policy](#query-a-non-default-retention-policy) +- [Execute multiple queries](#execute-multiple-queries) +- [Return query results with millisecond Unix timestamps](#return-query-results-with-millisecond-unix-timestamps) +- [Execute InfluxQL queries from a file](#execute-influxql-queries-from-a-file) + +##### Query using basic authentication + +{{% oss-only %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[curl](#curl) +[Node.js](#nodejs) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +{{% get-shared-text "api/v1-compat/auth/oss/basic-auth.sh" %}} +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +{{% get-shared-text "api/v1-compat/auth/oss/basic-auth.js" %}} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /oss-only %}} + +{{% cloud-only %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[curl](#curl) +[Node.js](#nodejs) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +{{% get-shared-text "api/v1-compat/auth/cloud/basic-auth.sh" %}} +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +{{% get-shared-text "api/v1-compat/auth/cloud/basic-auth.js" %}} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + + +{{% /cloud-only %}} + +##### Query a non-default retention policy +```sh +curl --get http://localhost:8086/query \ + --header "Authorization: Token INFLUX_API_TOKEN" \ + --data-urlencode "db=mydb" \ + --data-urlencode "rp=customrp" \ + --data-urlencode "q=SELECT used_percent FROM mem WHERE host=host1" +``` + +##### Execute multiple queries +```sh +curl --get http://localhost:8086/query \ + --header "Authorization: Token INFLUX_API_TOKEN" \ + --data-urlencode "db=mydb" \ + --data-urlencode "q=SELECT * FROM mem WHERE host=host1;SELECT mean(used_percent) FROM mem WHERE host=host1 GROUP BY time(10m)" +``` + +##### Return query results with millisecond Unix timestamps +```sh +curl --get http://localhost:8086/query \ + --header "Authorization: Token INFLUX_API_TOKEN" \ + --data-urlencode "db=mydb" \ + --data-urlencode "rp=myrp" \ + --data-urlencode "q=SELECT used_percent FROM mem WHERE host=host1" \ + --data-urlencode "epoch=ms" +``` + +##### Execute InfluxQL queries from a file +```sh +curl --get http://localhost:8086/query \ + --header "Authorization: Token INFLUX_API_TOKEN" \ + --data-urlencode "db=mydb" \ + --data-urlencode "q@path/to/influxql.txt" \ + --data-urlencode "async=true" +``` + +Replace the following: +- *`INFLUX_API_TOKEN`*: InfluxDB API token diff --git a/content/influxdb/v2.5/reference/api/influxdb-1x/write.md b/content/influxdb/v2.5/reference/api/influxdb-1x/write.md new file mode 100644 index 000000000..e06559634 --- /dev/null +++ b/content/influxdb/v2.5/reference/api/influxdb-1x/write.md @@ -0,0 +1,198 @@ +--- +title: /write 1.x compatibility API +list_title: /write +description: > + The `/write` 1.x compatibility endpoint writes data to InfluxDB Cloud and + InfluxDB OSS 2.x using patterns from the InfluxDB 1.x `/write` API endpoint. +menu: + influxdb_2_5_ref: + name: /write + parent: 1.x compatibility +weight: 301 +influxdb/v2.5/tags: [write] +list_code_example: | +
+  POST http://localhost:8086/write
+  
+related: + - /influxdb/v2.5/reference/syntax/line-protocol +--- + +The `/write` 1.x compatibility endpoint writes data to InfluxDB Cloud and InfluxDB OSS {{< current-version >}} +using patterns from the InfluxDB 1.x `/write` API endpoint. +Use the `POST` request method to write [line protocol](/influxdb/v2.5/reference/syntax/line-protocol/) +to the `/write` endpoint. + +
+POST http://localhost:8086/write
+
+ +{{% cloud-only %}} + +{{% note %}} +If you have an existing bucket that doesn't follow the **database/retention-policy** naming convention, +you **must** [manually create a database and retention policy mapping](/influxdb/v2.5/query-data/influxql/dbrp/#create-dbrp-mappings) +to write data to that bucket with the `/write` compatibility API. +{{% /note %}} + +{{% /cloud-only %}} + +## Authentication + +{{% oss-only %}} + +Use one of the following authentication methods: +* **token authentication** +* **basic authentication with username and password** +* **query string authentication with username and password** + +_For more information, see [Authentication](/influxdb/v2.5/reference/api/influxdb-1x/#authentication)._ + +{{% /oss-only %}} + +{{% cloud-only %}} + +{{% api/v1-compat/cloud/authentication %}} + +{{% /cloud-only %}} + +## Request body +Include your line protocol in the request body. +**Binary encode** the line protocol to prevent unintended formatting. +The examples [below](#write-examples) use the curl `--data-binary` flag to binary +encode the line protocol. + +## Query string parameters + +{{% oss-only %}} + +### u +(Optional) The 1.x **username** to authenticate the request. +_See [query string authentication](/influxdb/v2.5/reference/api/influxdb-1x/#query-string-authentication)._ + +### p +(Optional) The 1.x **password** to authenticate the request. +_See [query string authentication](/influxdb/v2.5/reference/api/influxdb-1x/#query-string-authentication)._ + +{{% /oss-only %}} + +{{% cloud-only %}} + +### u +(Optional) The InfluxDB Cloud **username** to authenticate the request. +_See [query string authentication](/influxdb/cloud/reference/api/influxdb-1x/#query-string-authentication)._ + +### p +(Optional) The InfluxDB Cloud **API token** to authenticate the request. +_See [query string authentication](/influxdb/cloud/reference/api/influxdb-1x/#query-string-authentication)._ + +{{% /cloud-only %}} + +### db +({{< req >}}) The **database** to write data to. +This is mapped to an InfluxDB [bucket](/influxdb/v2.5/reference/glossary/#bucket). +_See [Database and retention policy mapping](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/)._ + +### rp +The **retention policy** to write data to. +This is mapped to an InfluxDB [bucket](/influxdb/v2.5/reference/glossary/#bucket). +_See [Database and retention policy mapping](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/)._ + +### precision +The precision of [Unix timestamps](/influxdb/v2.5/reference/glossary/#unix-timestamp) in the line protocol. +Default is nanosconds (`ns`). +The following precisions are available: + +- `ns` - nanoseconds +- `u` or `µ` - microseconds +- `ms` - milliseconds +- `s` - seconds +- `m` - minutes +- `h` - hours + +## Write examples + +- [Write data using basic authentication](#write-data-using-basic-authentication) +- [Write data to a non-default retention policy](#write-data-to-a-non-default-retention-policy) +- [Write multiple lines of line protocol](#write-multiple-lines-of-line-protocol) +- [Write data with millisecond Unix timestamps](#write-data-with-millisecond-unix-timestamps) +- [Use curl to write data from a file](#use-curl-to-write-data-from-a-file) + +##### Write data using basic authentication + +{{% oss-only %}} + +```sh +curl --request POST http://localhost:8086/write?db=mydb \ + --user "INFLUX_USERNAME:INFLUX_PASSWORD_OR_TOKEN" \ + --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000" +``` + +{{% /oss-only %}} + +{{% cloud-only %}} + +```sh +curl --request POST https://cloud2.influxdata.com/write?db=mydb \ + --user "exampleuser@influxdata.com:INFLUX_API_TOKEN" \ + --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000" +``` + +{{% /cloud-only %}} + +##### Write data using token authentication +```sh +curl --request POST http://localhost:8086/write?db=mydb \ + --header "Authorization: Token INFLUX_API_TOKEN" \ + --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000" +``` + +##### Write data to a non-default retention policy + +```sh +curl --request POST http://localhost:8086/write?db=mydb&rp=customrp \ + --header "Authorization: Token INFLUX_API_TOKEN" \ + --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000" +``` + + + +##### Write multiple lines of line protocol +```sh +curl --request POST http://localhost:8086/write?db=mydb \ + --header "Authorization: Token INFLUX_API_TOKEN" \ + --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000 +measurement,host=host2 field1=14i,field2=12.7 1577836800000000000 +measurement,host=host3 field1=5i,field2=6.8 1577836800000000000" +``` + +##### Write data with millisecond Unix timestamps +```sh +curl --request POST http://localhost:8086/write?db=mydb&precision=ms \ + --header "Authorization: Token INFLUX_API_TOKEN" \ + --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000" +``` + +##### Use curl to write data from a file +```sh +curl --request POST http://localhost:8086/write?db=mydb \ + --header "Authorization: Token INFLUX_API_TOKEN" \ + --data-binary @path/to/line-protocol.txt +``` + +{{% oss-only %}} + +Replace the following: +- *`INFLUX_USERNAME`*: [InfluxDB 1.x username](/influxdb/v2.5/reference/api/influxdb-1x/#manage-credentials) +- *`INFLUX_PASSWORD_OR_TOKEN`*: [InfluxDB 1.x password or InfluxDB API token](/influxdb/v2.5/reference/api/influxdb-1x/#manage-credentials) +- *`INFLUX_API_TOKEN`*: your [InfluxDB API token](/influxdb/v2.5/reference/glossary/#token) + +{{% /oss-only %}} + +{{% cloud-only %}} + +Replace the following: +- *`exampleuser@influxdata.com`*: the email address that you signed up with +- *`INFLUX_API_TOKEN`*: your [InfluxDB API token](/influxdb/cloud/reference/glossary/#token) + +{{% /cloud-only %}} diff --git a/content/influxdb/v2.5/reference/cli/_index.md b/content/influxdb/v2.5/reference/cli/_index.md new file mode 100644 index 000000000..f00d66f2e --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/_index.md @@ -0,0 +1,18 @@ +--- +title: Command line tools +seotitle: Command line tools for managing InfluxDB +description: > + InfluxDB provides command line tools designed to aid in managing and working + with InfluxDB from the command line. +influxdb/v2.5/tags: [cli] +menu: + influxdb_2_5_ref: + name: Command line tools +weight: 4 +--- + +InfluxDB provides command line tools designed to aid in managing and working +with InfluxDB from the command line. +The following command line interfaces (CLIs) are available: + +{{< children >}} diff --git a/content/influxdb/v2.5/reference/cli/influx/_index.md b/content/influxdb/v2.5/reference/cli/influx/_index.md new file mode 100644 index 000000000..7a42f9a62 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/_index.md @@ -0,0 +1,163 @@ +--- +title: influx - InfluxDB command line interface +seotitle: influx - InfluxDB command line interface +description: > + The `influx` CLI includes commands to manage many aspects of InfluxDB, + including buckets, organizations, users, tasks, etc. +menu: + influxdb_2_5_ref: + name: influx + parent: Command line tools +weight: 101 +influxdb/v2.5/tags: [cli] +related: + - /influxdb/v2.5/tools/influx-cli/ +--- + +The `influx` command line interface (CLI) includes commands to manage many aspects of InfluxDB, +including buckets, organizations, users, tasks, etc. + +{{% oss-only %}} + +{{% note %}} +#### InfluxDB OSS and influx CLI versions +Beginning with **InfluxDB 2.1**, the `influx` CLI is packaged and versioned separately +from InfluxDB. +InfluxDB and `influx` CLI versions may differ, but compatibility is noted for each command. +{{% /note %}} + +{{% /oss-only %}} + +## Download and install the influx CLI +Download and install the influx CLI + +### Provide required authentication credentials +To avoid having to pass your InfluxDB **host**, **API token**, and **organization** +with each command, store them in an `influx` CLI configuration (config). +`influx` commands that require these credentials automatically retrieve these +credentials from the active config. + +Use the [`influx config create` command](/influxdb/v2.5/reference/cli/influx/config/create/) +to create an `influx` CLI config and set it as active: + +```sh +influx config create --config-name \ + --host-url http://localhost:8086 \ + --org \ + --token \ + --active +``` + +For more information about managing CLI configurations, see the +[`influx config` documentation](/influxdb/v2.5/reference/cli/influx/config/). + +## Usage + +``` +influx [flags] +influx [command] +``` + +## Commands + +| Command | Description | +| :------------------------------------------------------------------ | :------------------------------------------------------------------------- | +| [apply](/influxdb/v2.5/reference/cli/influx/apply/) | Apply an InfluxDB template | +| [auth](/influxdb/v2.5/reference/cli/influx/auth/) | API token management commands | +| [backup](/influxdb/v2.5/reference/cli/influx/backup/) | Back up data _(InfluxDB OSS only)_ | +| [bucket](/influxdb/v2.5/reference/cli/influx/bucket/) | Bucket management commands | +| [bucket-schema](/influxdb/v2.5/reference/cli/influx/bucket-schema/) | Manage InfluxDB bucket schemas _(InfluxDB Cloud only)_ | +| [completion](/influxdb/v2.5/reference/cli/influx/completion/) | Generate completion scripts | +| [config](/influxdb/v2.5/reference/cli/influx/config/) | Configuration management commands | +| [dashboards](/influxdb/v2.5/reference/cli/influx/dashboards/) | List dashboards | +| [delete](/influxdb/v2.5/reference/cli/influx/delete/) | Delete points from InfluxDB | +| [export](/influxdb/v2.5/reference/cli/influx/export/) | Export resources as a template | +| [help](/influxdb/v2.5/reference/cli/influx/help/) | Help about any command | +| [org](/influxdb/v2.5/reference/cli/influx/org/) | Organization management commands | +| [ping](/influxdb/v2.5/reference/cli/influx/ping/) | Check the InfluxDB `/health` endpoint | +| [query](/influxdb/v2.5/reference/cli/influx/query/) | Execute a Flux query | +| [restore](/influxdb/v2.5/reference/cli/influx/restore/) | Restore backup data _(InfluxDB OSS only)_ | +| [scripts](/influxdb/v2.5/reference/cli/influx/scripts) | Scripts management commands _(InfluxDB Cloud only)_ | +| [secret](/influxdb/v2.5/reference/cli/influx/secret/) | Manage secrets | +| [setup](/influxdb/v2.5/reference/cli/influx/setup/) | Create default username, password, org, bucket, etc. _(InfluxDB OSS only)_ | +| [stacks](/influxdb/v2.5/reference/cli/influx/stacks/) | Manage InfluxDB stacks | +| [task](/influxdb/v2.5/reference/cli/influx/task/) | Task management commands | +| [telegrafs](/influxdb/v2.5/reference/cli/influx/telegrafs/) | Telegraf configuration management commands | +| [template](/influxdb/v2.5/reference/cli/influx/template/) | Summarize and validate an InfluxDB template | +| [user](/influxdb/v2.5/reference/cli/influx/user/) | User management commands | +| [v1](/influxdb/v2.5/reference/cli/influx/v1/) | Work with the v1 compatibility API | +| [version](/influxdb/v2.5/reference/cli/influx/version/) | Print the influx CLI version | +| [write](/influxdb/v2.5/reference/cli/influx/write/) | Write points to InfluxDB | + +## Flags + +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `influx` command | + +### Flag patterns and conventions +The `influx` CLI uses the following patterns and conventions: + +- [Mapped environment variables](#mapped-environment-variables) +- [Shorthand and longhand flags](#shorthand-and-longhand-flags) +- [Flag input types](#flag-input-types) + +#### Mapped environment variables +`influx` CLI flags mapped to environment variables are listed in the **Mapped to** +column of the Flags table in each command documentation. +Mapped flags inherit the value of the environment variable. +To override environment variables, set the flag explicitly in your command. + +{{< expand-wrapper >}} +{{% expand "View mapped environment variables" %}} + +{{% note %}} +Some `influx` CLI commands may not support all mapped environment variables. +For more information about what mapped environment variables each command supports, +see the command documentation. +{{% /note %}} + +| Environment variable | Description | +| :----------------------- | :-------------------------------------------------------------------- | +| `INFLUX_ACTIVE_CONFIG` | CLI configuration to use for commands | +| `INFLUX_BUCKET_ID` | Bucket ID | +| `INFLUX_BUCKET_NAME` | Bucket name | +| `INFLUX_CONFIGS_PATH` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | +| `INFLUX_HIDE_HEADERS` | Hide table headers in command output (default `false`) | +| `INFLUX_HOST` | HTTP address of InfluxDB (default `http://localhost:8086`) | +| `INFLUX_NAME` | InfluxDB Username | +| `INFLUX_ORG` | InfluxDB Organization name | +| `INFLUX_ORG_DESCRIPTION` | Organization description | +| `INFLUX_ORG_ID` | InfluxDB Organization ID | +| `INFLUX_OUTPUT_JSON` | Return command output JSON | +| `INFLUX_SKIP_VERIFY` | Skip TLS certificate verification | +| `INFLUX_TOKEN` | InfluxDB API token | + +{{% /expand %}} +{{< /expand-wrapper >}} + +#### Shorthand and longhand flags +Many `influx` CLI flags support both shorthand and longhand forms. + +- **shorthand:** a shorthand flag begins with a single hyphen followed by a single letter (for example: `-c`). +- **longhand:** a longhand flag starts with two hyphens followed by a multi-letter, + hyphen-spaced flag name (for example: `--active-config`). + +Commands can use both shorthand and longhand flags in a single execution. + +#### Flag input types +`influx` CLI flag input types are listed in each the table of flags for each command. +Flags support the following input types: + +##### string +Text string, but the flag can be used **only once** per command execution. + +##### stringArray +Single text string, but the flag can be used **multiple times** per command execution. + +##### integer +Sequence of digits representing an integer value. + +##### duration +Length of time represented by an integer and a duration unit +(`1ns`, `1us`, `1µs`, `1ms`, `1s`, `1m`, `1h`, `1d`, `1w`). diff --git a/content/influxdb/v2.5/reference/cli/influx/apply/_index.md b/content/influxdb/v2.5/reference/cli/influx/apply/_index.md new file mode 100644 index 000000000..c314098a5 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/apply/_index.md @@ -0,0 +1,132 @@ +--- +title: influx apply +description: The `influx apply` command applies InfluxDB templates. +menu: + influxdb_2_5_ref: + name: influx apply + parent: influx +weight: 101 +aliases: + - /influxdb/v2.5/reference/cli/influx/pkg/ +influxdb/v2.5/tags: [templates] +related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx apply` command applies InfluxDB templates. +_For information about finding and using InfluxDB templates, see +[Use InfluxDB templates](/influxdb/v2.5/influxdb-templates/use/)._ + +## Usage +``` +influx apply [flags] +``` + +## Flags +| Flag | | Description | Input Type | {{< cli/mapped >}} | +|:-----|:--------------------------|:--------------------------------------------------------------------------------------------|:-----------|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| | `--disable-color` | Disable color in output | | | +| | `--disable-table-borders` | Disable table borders | | | +| `-e` | `--encoding` | Encoding of the input stream | string | | +| | `--env-ref` | Environment references to provide with the template (format: `--env-ref=REF_KEY=REF_VALUE`) | string | | +| `-f` | `--file` | Path to template file (supports HTTP(S) URLs or file paths) | string | | +| | `--filter` | Resources to skip when applying the template (filter by `kind` or `resource`) | string | | +| | `--force` | Ignore warnings about destructive changes | | | +| `-h` | `--help` | Help for the `apply` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name that owns the bucket (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID that owns the bucket (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| `-q` | `--quiet` | Disable output printing | | | +| `-R` | `--recurse` | Recurse through files in the directory specified in `-f`, `--file` | | | +| | `--secret` | Secrets to provide with the template (format: `--secret=SECRET_KEY=SECRET_VALUE`) | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--stack-id` | Stack ID to associate when applying the template | string | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples: how to apply a template or stack + +{{< cli/influx-creds-note >}} + +- [from a file](#apply-a-template-from-a-file) +- [from a URL](#apply-a-template-from-a-url) +- [from a stack that has associated templates](#apply-a-stack-that-has-associated-templates) +- [a template to a stack](#apply-a-template-to-a-stack) +- [multiple template files together](#apply-multiple-template-files-together) +- [a template from stdin](#apply-a-template-from-stdin) +- [all templates in a directory](#apply-all-templates-in-a-directory) +- [recursively from a directory](#recursively-apply-templates-from-a-directory) +- [from multiple sources](#apply-templates-from-multiple-sources) +- [skip resources](#apply-a-template-but-skip-resources) + +##### Apply a template from a file +```sh +influx apply --file path/to/template.json +``` + +##### Apply a template from a URL +```sh +influx apply --file https://raw.githubusercontent.com/influxdata/community-templates/master/docker/docker.yml +``` + +##### Apply a stack that has associated templates +To apply all templates associated with a stack ID to a new stack: + +```sh +influx apply --stack-id $STACK_ID +``` + +##### Apply a template to a stack +```sh +influx apply --file path/to/template.json --stack-id $STACK_ID +``` + +##### Apply multiple template files together +```sh +influx apply \ + --file path/to/template_1.json \ + --file path/to/template_2.yml +``` + +##### Apply a template from stdin +```sh +cat template.json | influx apply --encoding json +``` + +##### Apply all templates in a directory +```sh +influx apply --file path/to/template_directory +``` + +##### Recursively apply templates from a directory +```sh +influx apply --recurse --file path/to/template_directory +``` + +##### Apply templates from multiple sources +```sh +influx apply \ + --file path/to/template.yml + --file path/to/templates_directory + --file https://example.com/template.json +``` + +##### Apply a template, but skip resources +```sh +# The following example skips all buckets and the dashboard +# whose metadata.name field matches "example-dashboard". + +# Filter format: +# --filter=kind=Bucket +# --filter=resource=Label:$Label_TMPL_NAME + +influx apply \ + --file path/to/template.yml \ + --filter kind=Bucket \ + --filter resource=Dashboard:example-dashboard +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/auth/_index.md b/content/influxdb/v2.5/reference/cli/influx/auth/_index.md new file mode 100644 index 000000000..168272c0d --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/auth/_index.md @@ -0,0 +1,41 @@ +--- +title: influx auth +description: The `influx auth` command and its subcommands manage API tokens in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx auth + parent: influx +weight: 101 +influxdb/v2.5/tags: [authentication] +cascade: + related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +cascade: + metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx auth` command and its subcommands manage API tokens in InfluxDB. + +## Usage +``` +influx auth [flags] +influx auth [command] +``` + +#### Command aliases +`auth`, `authorization` + +## Subcommands +| Subcommand | Description | +|:---------- |:----------- | +| [active](/influxdb/v2.5/reference/cli/influx/auth/active) | Activate API token | +| [create](/influxdb/v2.5/reference/cli/influx/auth/create) | Create API token | +| [delete](/influxdb/v2.5/reference/cli/influx/auth/delete) | Delete API token | +| [list](/influxdb/v2.5/reference/cli/influx/auth/list) | List API tokens | +| [inactive](/influxdb/v2.5/reference/cli/influx/auth/inactive) | Inactivate API token | + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `auth` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/auth/active.md b/content/influxdb/v2.5/reference/cli/influx/auth/active.md new file mode 100644 index 000000000..aa29bc0ec --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/auth/active.md @@ -0,0 +1,40 @@ +--- +title: influx auth active +description: The `influx auth active` command sets an API token to active in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx auth active + parent: influx auth +weight: 201 +--- + +The `influx auth active` command activates an API token. +Only active tokens authorize access to InfluxDB. + +## Usage +``` +influx auth active [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `active` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| `-i` | `--id` | ({{< req >}}) API token ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Example + +{{< cli/influx-creds-note >}} + +##### Activate an API token +```sh +influx auth active --id 06c86c40a9f36000 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/auth/create.md b/content/influxdb/v2.5/reference/cli/influx/auth/create.md new file mode 100644 index 000000000..4840ec193 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/auth/create.md @@ -0,0 +1,158 @@ +--- +title: influx auth create +description: The `influx auth create` command creates an API token in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx auth create + parent: influx auth +weight: 201 +updated_in: CLI 2.5.0 +--- + +The `influx auth create` command creates an API token in InfluxDB. + +{{% warn %}} +**Issue resolved**: Using influx CLI 2.4 prevented you from creating an **all-access** or **operator** token using the `influx auth create` command. This issue is resolved in the influx 2.5 CLI release. Please [upgrade to the latest version](/influxdb/latest/tools/influx-cli/) of the influx cli. +{{% /warn %}} + +## Usage +``` +influx auth create [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:--------------------------------|:----------------------------------------------------------------------|:-----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--all-access` | Grants all permissions in a single organization | | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-d` | `--description` | API token description | string | | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--operator` | _(InfluxDB OSS only)_ Grants all permissions in all organizations | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--read-bucket` | Grant permission to read a specified bucket ID | stringArray | | +| | `--read-buckets` | Grant permission to read **all** organization buckets | | | +| | `--read-checks` | Grant permission to read checks | | | +| | `--read-dashboards` | Grant permission to read dashboards | | | +| | `--read-dbrps` | Grant permission to read database retention policy mappings | | | +| | `--read-notificationEndpoints` | Grant permission to read notificationEndpoints | | | +| | `--read-notificationRules` | Grant permission to read notificationRules | | | +| | `--read-orgs` | Grant permission to read organizations | | | +| | `--read-remotes` | Grant permission to read remote configurations | | | +| | `--read-replications` | Grant permission to read replication configurations | | | +| | `--read-tasks` | Grant permission to read tasks | | | +| | `--read-telegrafs` | Grant permission to read Telegraf configurations | | | +| | `--read-users` | Grant permission to read organization users | | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| `-u` | `--user` | Username | string | | +| | `--write-annotations` | Grant permission to create annotations | | | +| | `--write-bucket` | Grant permission to write to specified a bucket ID | stringArray | | +| | `--write-buckets` | Grant permission to create and update **all** organization buckets | | | +| | `--write-checks` | Grant permission to create checks | | | +| | `--write-dashboards` | Grant permission to create and update dashboards | | | +| | `--write-dbrps` | Grant permission to create database retention policy mappings | | | +| | `--write-notificationEndpoints` | Grant permission to create notificationEndpoints | | | +| | `--write-notificationRules` | Grant permission to create notificationRules | | | +| | `--write-orgs` | Grant permission to create and update organizations | | | +| | `--write-remotes` | Grant permission to create and update remote configurations | | | +| | `--write-replications` | Grant permission to create and update replication configurations | | | +| | `--write-tasks` | Grant permission to create and update tasks | | | +| | `--write-telegrafs` | Grant permission to create and update Telegraf configurations | | | +| | `--write-users` | Grant permission to create and update organization users | | | +| | `--write-variables` | Grant permission to create and update variables | | | + +## Examples + +{{< cli/influx-creds-note >}} + +- [Create an All-Access API token](#create-an-all-access-api-token) {{% oss-only %}} or [Create an Operator API token](#create-an-operator-api-token){{% /oss-only %}} +- [Create an API token with specified read and write permissions](#create-an-api-token-with-specified-read-and-write-permissions) +- [Create a token with read and write access to specific buckets](#create-an-api-token-with-read-and-write-access-to-specific-buckets) +- [Create a read-only API token](#create-a-read-only-api-token) + +### Create an All-Access API token + +Create an [All-Access token](/influxdb/cloud/security/tokens/#all-access-token) to grant permissions to all resources in an organization. + +```sh +influx auth create \ + --all-access +``` + +{{% oss-only %}} + +### Create an Operator API token + +Create an [Operator token](/influxdb/v2.0/security/tokens/#operator-token) to grant permissions to all resources in all organizations. + +```sh +influx auth create \ + --operator +``` +{{% /oss-only %}} + +### Create an API token with specified read and write permissions + +```sh +influx auth create \ + --read-buckets \ + --read-checks \ + --read-dashboards \ + --read-dbrps \ + --read-notificationEndpoints \ + --read-notificationRules \ + --read-orgs \ + --read-remotes \ + --read-replications \ + --read-tasks \ + --read-telegrafs \ + --read-users \ + --write-annotations \ + --write-buckets \ + --write-checks \ + --write-dashboards \ + --write-dbrps \ + --write-notificationEndpoints \ + --write-notificationRules \ + --write-orgs \ + --write-remotes \ + --write-replications \ + --write-tasks \ + --write-telegrafs \ + --write-users \ + --write-variables +``` + +### Create an API token with read and write access to specific buckets + +```sh +influx auth create \ + --read-bucket 0000000000000001 \ + --read-bucket 0000000000000002 \ + --write-bucket 0000000000000001 \ + --write-bucket 0000000000000002 +``` + +### Create a read-only API token + +```sh +influx auth create \ + --read-buckets \ + --read-checks \ + --read-dashboards \ + --read-dbrps \ + --read-notificationEndpoints \ + --read-notificationRules \ + --read-orgs \ + --read-remotes \ + --read-replications \ + --read-tasks \ + --read-telegrafs \ + --read-users +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/auth/delete.md b/content/influxdb/v2.5/reference/cli/influx/auth/delete.md new file mode 100644 index 000000000..db427947d --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/auth/delete.md @@ -0,0 +1,39 @@ +--- +title: influx auth delete +description: The `influx auth delete` command deletes an API token from InfluxDB. +menu: + influxdb_2_5_ref: + name: influx auth delete + parent: influx auth +weight: 201 +--- + +The `influx auth delete` command deletes an API token from InfluxDB. + +## Usage +``` +influx auth delete [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `delete` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | ({{< req >}}) API token ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Delete an API token +```sh +influx auth delete --id 06c86c40a9f36000 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/auth/inactive.md b/content/influxdb/v2.5/reference/cli/influx/auth/inactive.md new file mode 100644 index 000000000..777a0af38 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/auth/inactive.md @@ -0,0 +1,47 @@ +--- +title: influx auth inactive +description: The `influx auth inactive` command inactivates an API token in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx auth inactive + parent: influx auth +weight: 201 +--- + +The `influx auth inactive` command inactivates an API token in InfluxDB. +Inactive tokens **do not** authorize access to InfluxDB. + +To temporarily disable client access to InfluxDB, inactivate the authentication +token the client is using rather than delete the token. +If you delete the token, you have to generate a new token and update the client +with the new token. +By setting a token to inactive, you can [activate the token](/influxdb/v2.5/reference/cli/influx/auth/active/) +to grant the client access without having to modify the client. + +## Usage +``` +influx auth inactive [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `inactive` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | ({{< req >}}) API token ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Inactivate an API token +```sh +influx auth inactive --id 06c86c40a9f36000 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/auth/list.md b/content/influxdb/v2.5/reference/cli/influx/auth/list.md new file mode 100644 index 000000000..8a2922df1 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/auth/list.md @@ -0,0 +1,53 @@ +--- +title: influx auth list +description: The `influx auth list` command lists API tokens in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx auth list + parent: influx auth +weight: 201 +aliases: + - /influxdb/v2.5/reference/cli/influx/auth/find +--- + +The `influx auth list` command lists and searches API tokens in InfluxDB. + +## Usage +``` +influx auth list [flags] +``` + +#### Command aliases +`list`, `ls`, `find` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | API token ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| `-u` | `--user` | Username | string | | +| | `--user-id` | User ID | string | | + +## Examples + +{{< cli/influx-creds-note >}} + +##### List all API tokens +```sh +influx auth list +``` + +##### List API tokens associated with a user +```sh +influx auth list --user username +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/backup/_index.md b/content/influxdb/v2.5/reference/cli/influx/backup/_index.md new file mode 100644 index 000000000..7102a76d5 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/backup/_index.md @@ -0,0 +1,66 @@ +--- +title: influx backup +description: The `influx backup` command backs up data stored in InfluxDB to a specified directory. +menu: + influxdb_2_5_ref: + name: influx backup + parent: influx +weight: 101 +influxdb/v2.5/tags: [backup] +related: + - /influxdb/v2.5/backup-restore/backup/ + - /influxdb/v2.5/reference/cli/influx/restore/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +updated_in: CLI v2.0.2 +--- + +The `influx backup` command backs up data stored in InfluxDB to a specified directory. + +## Usage +``` +influx backup [flags] path +``` + +## Flags + +| Flag | | Description | Input type | {{< cli/mapped >}} | +|------|-------------------|------------------------------------------------------------------------------------------------------------|------------|-----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--bucket-id` | ID of the bucket to back up from (mutually exclusive with `--bucket`) | string | | +| `-b` | `--bucket` | Name of the bucket to back up from (mutually exclusive with `--bucket-id`) | string | | +| | `--compression` | By default, `gzip` argument enables compression on downloaded files. Set to `none` to disable compression. | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `backup` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default: `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | string | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +- [Back up all data to a directory](#back-up-all-data-to-a-directory) +- [Back up all data to the current working directory](#back-up-all-data-to-the-current-working-directory) +- [Back up a specific bucket to a directory](#back-up-a-specific-bucket-to-a-directory) + +##### Back up all data to a directory +```sh +influx backup /path/to/backup/dir/ +``` + +##### Back up all data to the current working directory +```sh +influx backup ./ +``` + +##### Back up a specific bucket to a directory +```sh +influx backup --bucket example-bucket /path/to/backup/dir/ +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/bucket-schema/_index.md b/content/influxdb/v2.5/reference/cli/influx/bucket-schema/_index.md new file mode 100644 index 000000000..708696684 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/bucket-schema/_index.md @@ -0,0 +1,47 @@ +--- +title: influx bucket-schema +description: The `influx bucket-schema` command and its subcommands manage schemas of buckets in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx bucket-schema + parent: influx +weight: 101 +influxdb/v2.5/tags: [buckets, bucket-schema] +cascade: + related: + - /influxdb/cloud/organizations/buckets/bucket-schema + - /influxdb/cloud/organizations/buckets/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + metadata: [influx CLI 2.1.0+, InfluxDB Cloud only] + prepend: + block: cloud + content: | + #### Works with InfluxDB Cloud bucket schemas + `influx bucket-schema` and its subcommands work with [InfluxDB Cloud bucket schemas](/influxdb/cloud/organizations/buckets/bucket-schema). + This feature is not available in InfluxDB OSS v2.3. +--- + +The `influx bucket-schema` command and its subcommands manage +schemas for InfluxDB buckets. + +## Usage + +``` +influx bucket-schema [flags] +influx bucket-schema [command] +``` + +## Subcommands + +| Subcommand | Description | +| :----------------------------------------------------------------- | :--------------------- | +| [create](/influxdb/v2.5/reference/cli/influx/bucket-schema/create) | Create a bucket schema | +| [list](/influxdb/v2.5/reference/cli/influx/bucket-schema/list) | List bucket schemas | +| [update](/influxdb/v2.5/reference/cli/influx/bucket-schema/update) | Update a bucket schema | + +## Flags + +| Flag | | Description | +| :--- | :------- | :----------------------------------- | +| `-h` | `--help` | Help for the `bucket-schema` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/bucket-schema/create.md b/content/influxdb/v2.5/reference/cli/influx/bucket-schema/create.md new file mode 100644 index 000000000..5df84a684 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/bucket-schema/create.md @@ -0,0 +1,90 @@ +--- +title: influx bucket-schema create +description: The `influx bucket-schema create` command sets the schema for a measurement in an InfluxDB bucket that has the `explicit` schema-type. +menu: + influxdb_2_5_ref: + name: influx bucket-schema create + parent: influx bucket-schema +weight: 201 +related: + - /influxdb/cloud/organizations/buckets/bucket-schema +--- + +The `influx bucket-schema create` command sets the schema for a measurement in +an InfluxDB bucket that has the [`explicit` schema-type](/influxdb/cloud/reference/cli/influx/bucket/create/#create-a-bucket-with-an-explicit-schema). + +## Usage + +```sh +influx bucket-schema create [flags] +``` + +## Flags + +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:--------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| `-n` | `--bucket` | ({{< req >}}) Bucket name (mutually exclusive with `--bucket-id`) | string | | +| `-i` | `--bucket-id` | ({{< req >}}) Bucket ID (mutually exclusive with `--bucket`) | string | | +| | `--columns-file` | ({{< req >}}) Path to column definitions file. For more information, see [Create a columns file](/influxdb/cloud/reference/cli/influx/bucket-schema/create/#create-a-columns-file). | string | | +| | `--columns-format` | Columns file format (`csv`, `ndjson`, `json`, default: `auto`). For more information, see [Create a schema with columns format](#create-a-schema-with-columns-format) | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-x` | `--extended-output` | Print column information for each measurement schema (default: false) | | | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | ({{< req >}}) Measurement name | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + + +## Examples + +{{< cli/influx-creds-note >}} + +- [Create a schema using the influx CLI](#create-a-schema-using-the-influx-cli) +- [Create a schema and print column information](#create-a-schema-and-print-column-information) +- [Create a schema with columns format](#create-a-schema-with-columns-format) + +### Create a schema using the influx CLI + +```sh +influx bucket-schema create \ + --bucket example-bucket \ + --name temperature \ + --columns-file columns.csv +``` + +### Create a schema and print column information + +```sh +influx bucket-schema create \ + --bucket example-bucket \ + --name cpu \ + --columns-file columns.csv \ + --extended-output +``` + +### Create a schema with columns format + +By default, InfluxDB attempts to detect the **columns file** format. +If your file's extension doesn't match the format, set the format with the [`columns-format` flag](/influxdb/cloud/reference/cli/influx/bucket-schema/create). + +```sh +influx bucket-schema create \ + --bucket example-bucket \ + --name cpu \ + --columns-file columns.json \ + --columns-format ndjson +``` + +```sh +influx bucket-schema create \ + --bucket example-bucket \ + --name cpu \ + --columns-file columns.txt \ + --columns-format csv +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/bucket-schema/list.md b/content/influxdb/v2.5/reference/cli/influx/bucket-schema/list.md new file mode 100644 index 000000000..3fa82992e --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/bucket-schema/list.md @@ -0,0 +1,59 @@ +--- +title: influx bucket-schema list +description: The `influx bucket-schema list` command lists the schemas of an InfluxDB bucket that has the `explicit` schema-type. +menu: + influxdb_2_5_ref: + name: influx bucket-schema list + parent: influx bucket-schema +weight: 201 +related: + - /influxdb/cloud/organizations/buckets/bucket-schema +--- + +The `influx bucket-schema list` command lists the schemas of an +InfluxDB bucket that has the [`explicit` schema-type](/influxdb/cloud/reference/cli/influx/bucket/create/#create-a-bucket-with-an-explicit-schema). + +## Usage + +```sh +influx bucket-schema list [flags] +``` + +## Flags + +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:--------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| `-n` | `--bucket` | Bucket name (mutually exclusive with `--bucket-id`) | string | | +| `-i` | `--bucket-id` | Bucket ID (mutually exclusive with `--bucket`) | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-x` | `--extended-output` | Print column information for each measurement schema (default: false) | | | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | Measurement name | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +### List all schemas of a bucket and print column information + +```sh +influx bucket-schema list \ + --bucket example-bucket + --extended-output +``` + +### Print column details for a single measurement +```sh +influx bucket-schema list \ + --bucket example-bucket \ + --name cpu \ + --extended-output +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/bucket-schema/update.md b/content/influxdb/v2.5/reference/cli/influx/bucket-schema/update.md new file mode 100644 index 000000000..f0fc840dd --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/bucket-schema/update.md @@ -0,0 +1,96 @@ +--- +title: influx bucket-schema update +description: The `influx bucket-schema update` command updates the schema of an InfluxDB bucket that has the `explicit` schema-type. +menu: + influxdb_2_5_ref: + name: influx bucket-schema update + parent: influx bucket-schema +weight: 201 +related: + - /influxdb/cloud/organizations/buckets/bucket-schema +--- + +The `influx bucket-schema update` command updates the schema of an InfluxDB bucket that has the [`explicit` schema-type](/influxdb/cloud/reference/cli/influx/bucket/create/#create-a-bucket-with-an-explicit-schema). + +`bucket-schema update` requires a bucket with at least one defined schema. + +## Usage + +```sh +influx bucket-schema update [flags] +``` + +##### Supported operations +- Adding new columns to a schema + +##### Unsupported operations +- Modify existing columns in a schema +- Delete columns from a schema + +## Flags + +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:--------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| `-n` | `--bucket` | ({{< req >}}) Bucket name (mutually exclusive with `--bucket-id`) | string | | +| `-i` | `--bucket-id` | ({{< req >}}) Bucket ID (mutually exclusive with `--bucket`) | string | | +| | `--columns-file` | ({{< req >}}) Path to column definitions file. For more information, see [Create a columns file](/influxdb/cloud/reference/cli/influx/bucket-schema/create/#create-a-columns-file). | string | | +| | `--columns-format` | Columns file format (`csv`, `ndjson`, `json`, default: `auto`). For more information, see [Update a schema with columns format](#update-a-schema-with-columns-format) | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-x` | `--extended-output` | Print column information for each measurement schema (default: false) | | | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | ({{< req >}}) Measurement name | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +- [Update a schema using the influx CLI](#update-a-schema-using-the-influx-cli) +- [Update a schema and print column information](#update-a-schema-and-print-column-information) +- [Update a schema with columns format](#update-a-schema-specifying-the-columns-format) + +### Update a schema using the influx CLI + +```sh +influx bucket-schema update \ + --bucket example-bucket \ + --name temperature \ + --columns-file columns.csv +``` + +### Update a schema and print column information +```sh +influx bucket-schema update \ + --bucket example-bucket \ + --name cpu \ + --columns-file columns.csv \ + -extended-output +``` + +### Update a schema with columns format + +By default, InfluxDB attempts to detect the **columns file** format. +If your file's extension doesn't match the format, set the format with the `columns-format` flag. + +```sh +influx bucket-schema update \ + --bucket example-bucket \ + --name cpu \ + --columns-file columns.json \ + --columns-format ndjson +``` + +```sh +influx bucket-schema update \ + --bucket example-bucket \ + --name cpu \ + --columns-file columns.txt \ + --columns-format csv +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/bucket/_index.md b/content/influxdb/v2.5/reference/cli/influx/bucket/_index.md new file mode 100644 index 000000000..e5ae05d60 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/bucket/_index.md @@ -0,0 +1,43 @@ +--- +title: influx bucket +description: The `influx bucket` command and its subcommands manage buckets in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx bucket + parent: influx +weight: 101 +influxdb/v2.5/tags: [buckets] +cascade: + related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +cascade: + metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx bucket` command and its subcommands manage buckets in InfluxDB. + +## Usage +``` +influx bucket [flags] +influx bucket [command] +``` + +## Subcommands +| Subcommand | Description | +|:---------- |:----------- | +| [create](/influxdb/v2.5/reference/cli/influx/bucket/create) | Create bucket | +| [delete](/influxdb/v2.5/reference/cli/influx/bucket/delete) | Delete bucket | +| [list](/influxdb/v2.5/reference/cli/influx/bucket/list) | List buckets | +| [update](/influxdb/v2.5/reference/cli/influx/bucket/update) | Update bucket | + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:-----------|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `bucket` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | diff --git a/content/influxdb/v2.5/reference/cli/influx/bucket/create.md b/content/influxdb/v2.5/reference/cli/influx/bucket/create.md new file mode 100644 index 000000000..ba02dc867 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/bucket/create.md @@ -0,0 +1,116 @@ +--- +title: influx bucket create +description: The `influx bucket create` command creates a bucket in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx bucket create + parent: influx bucket +weight: 201 +aliases: + - /influxdb/v2.5/reference/cli/influx/bucket/create/ +related: + - /influxdb/v2.5/organizations/buckets/create-bucket/ + - /influxdb/v2.5/reference/internals/shards/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +updated_in: CLI v2.3.0 +--- + +The `influx bucket create` command creates a bucket in InfluxDB. + +## Usage + +```sh +influx bucket create [flags] +``` + +## Flags + +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :----------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------: | :-------------------- | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-d` | `--description` | Bucket description | string | | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | Bucket name | string | `INFLUX_BUCKET_NAME` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| `-r` | `--retention` | Duration bucket retains data (0 is infinite, default is 0) | duration | | +| | `--schema-type` | Bucket schema type (`explicit`, default `implicit`) _(Cloud only)_. For more information, see [Manage bucket schema](/influxdb/cloud/organizations/buckets/bucket-schema/). | string | | +| | `--shard-group-duration` | Bucket shard group duration (OSS only) | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +{{% note %}} +#### Retention periods +The minimum retention period is **one hour**. Valid `--retention` units: + +- nanoseconds (`ns`) +- microseconds (`us` or `µs`) +- milliseconds (`ms`) +- seconds (`s`) +- minutes (`m`) +- hours (`h`) +- days (`d`) +- weeks (`w`) +{{% /note %}} + +## Examples + +{{< cli/influx-creds-note >}} + +- [Create a bucket with infinite data retention](#create-a-bucket-with-infinite-data-retention) +- [Create a bucket that retains data for 30 days](#create-a-bucket-that-retains-data-for-30-days) +- [Create a bucket with a description](#create-a-bucket-with-a-description) +- [Create a bucket with a custom shard group duration](#create-a-bucket-with-a-custom-shard-group-duration) +- [Create a bucket with an explicit schema](#create-a-bucket-with-an-explicit-schema) + +##### Create a bucket with infinite data retention + +```sh +influx bucket create --name example-bucket +``` + +##### Create a bucket that retains data for 30 days + +```sh +influx bucket create \ + --name example-bucket \ + --retention 30d +``` + +##### Create a bucket with a description + +```sh +influx bucket create \ + --name example-bucket \ + --description "Example bucket description" +``` + +##### Create a bucket with a custom shard group duration + +Custom shard group durations are only supported in **InfluxDB OSS**. +The shard group duration must be shorter than the bucket's retention period. For more information, see [InfluxDB shards and shard groups](/influxdb/v2.5/reference/internals/shards/). + +```sh +influx bucket create \ + --name example-bucket \ + --retention 30d \ + --shard-group-duration 2d +``` + +##### Create a bucket with an explicit schema + +{{% cloud %}} +[Explicit bucket schemas](/influxdb/cloud/reference/cli/influx/bucket-schema) are only +supported in **InfluxDB Cloud**. +For more information, see [Manage bucket schema](/influxdb/cloud/organizations/buckets/bucket-schema/). +{{% /cloud %}} + +```sh +{{< get-shared-text "bucket-schema/bucket-schema-type.sh" >}} +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/bucket/delete.md b/content/influxdb/v2.5/reference/cli/influx/bucket/delete.md new file mode 100644 index 000000000..6a6f7b99d --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/bucket/delete.md @@ -0,0 +1,53 @@ +--- +title: influx bucket delete +description: The `influx bucket delete` command deletes a bucket from InfluxDB and all the data it contains. +menu: + influxdb_2_5_ref: + name: influx bucket delete + parent: influx bucket +weight: 201 +aliases: + - /influxdb/v2.5/reference/cli/influx/bucket/delete/ +related: + - /influxdb/v2.5/organizations/buckets/delete-bucket/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +--- + +The `influx bucket delete` command deletes a bucket from InfluxDB and all the data it contains. + +## Usage +```sh +influx bucket delete [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `delete` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | Bucket ID _(required if no `--name`)_ | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | Bucket name _(requires `--org` or `org-id`)_ | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Delete a bucket by name +```sh +influx bucket delete --name example-bucket +``` + +##### Delete a bucket by ID +```sh +influx bucket delete --id 06c86c40a9f36000 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/bucket/list.md b/content/influxdb/v2.5/reference/cli/influx/bucket/list.md new file mode 100644 index 000000000..9bc79ddc7 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/bucket/list.md @@ -0,0 +1,66 @@ +--- +title: influx bucket list +description: The `influx bucket list` command lists and searches for buckets in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx bucket list + parent: influx bucket +weight: 201 +aliases: + - /influxdb/v2.5/reference/cli/influx/bucket/find + - /influxdb/v2.5/reference/cli/influx/bucket/list/ +updated_in: CLI v2.3.0 +--- + +The `influx bucket list` command lists and searches for buckets in InfluxDB. + +## Usage +``` +influx bucket list [flags] +``` + +#### Command aliases +`list`, `ls`, `find` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :--------------------------------------------------------------------------- | :--------: | :-------------------- | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | Bucket ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--limit` | Total number of buckets to fetch from the server, or 0 to return all buckets | string | | +| `-n` | `--name` | Bucket name | string | `INFLUX_BUCKET_NAME` | +| | `--offset` | Number of buckets to skip over in the list | integer | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--page-size` | Number of buckets to fetch per request to the server (default 20) | | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +- [List all buckets](#list-all-buckets) +- [List a bucket by name](#list-a-bucket-by-name) +- [List a bucket by ID](#list-a-bucket-by-id) + +##### List all buckets +```sh +influx bucket list +``` + +##### List a bucket by name +```sh +influx bucket list --name example-bucket +``` + +##### List a bucket by ID +```sh +influx bucket list --id 06c86c40a9f36000 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/bucket/update.md b/content/influxdb/v2.5/reference/cli/influx/bucket/update.md new file mode 100644 index 000000000..b029b71b9 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/bucket/update.md @@ -0,0 +1,87 @@ +--- +title: influx bucket update +description: The `influx bucket update` command updates information associated with buckets in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx bucket update + parent: influx bucket +weight: 201 +aliases: + - /influxdb/v2.5/reference/cli/influx/bucket/update/ +related: + - /influxdb/v2.5/organizations/buckets/update-bucket/ + - /influxdb/v2.5/reference/internals/shards/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +--- + +The `influx bucket update` command updates information associated with buckets in InfluxDB. + +## Usage +``` +influx bucket update [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:-------------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-d` | `--description` | Bucket description | string | | +| `-h` | `--help` | Help for the `update` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | ({{< req >}}) Bucket ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | New bucket name | string | `INFLUX_BUCKET_NAME` | +| `-r` | `--retention` | New duration bucket will retain data. For detail, see [Retention periods](/influxdb/v2.5/reference/cli/influx/bucket/update/#retention-periods). | duration | | +| | `--shard-group-duration` | Custom shard group duration for the bucket (OSS only) | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +{{% note %}} +#### Retention periods +The minimum retention period is **one hour**. Valid `--retention` units: + +- nanoseconds (`ns`) +- microseconds (`us` or `µs`) +- milliseconds (`ms`) +- seconds (`s`) +- minutes (`m`) +- hours (`h`) +- days (`d`) +- weeks (`w`) + +For an **infinite** retention period, set the number to 0. For example, `0s`. +{{% /note %}} + + + +## Examples + +{{< cli/influx-creds-note >}} + +##### Update the name of a bucket +```sh +influx bucket update \ + --id 06c86c40a9f36000 \ + --name new-bucket-name +``` + +##### Update the retention period of a bucket +```sh +influx bucket update \ + --id 06c86c40a9f36000 \ + --retention 90d +``` + +##### Update the shard group duration of a bucket +Custom shard group durations are only supported in **InfluxDB OSS**. +The shard group duration must be shorter than the buckets retention period. + +```sh +influx bucket update \ + --id 06c86c40a9f36000 \ + --shard-group-duration 2d +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/completion/_index.md b/content/influxdb/v2.5/reference/cli/influx/completion/_index.md new file mode 100644 index 000000000..d8d2b795a --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/completion/_index.md @@ -0,0 +1,61 @@ +--- +title: influx completion +description: > + The `influx completion` command outputs `influx` shell completion scripts for a + specified shell (`bash` or `zsh`). +menu: + influxdb_2_5_ref: + name: influx completion + parent: influx +weight: 101 +influxdb/v2.5/tags: [cli, tools] +related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx completion` command outputs `influx` shell completion scripts for a +specified shell (`bash` or `zsh`). + +## Usage +``` +influx completion [bash|zsh] [flags] +``` + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `completion` command | + +## Install completion scripts + +Add the appropriate installation command below to your `.bashrc` or `.zshrc`. + +#### Completion snippets in .bashrc or .zshrc +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[bash](#) +[zsh](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +# macOS +$ source $(brew --prefix)/etc/bash_completion.d +$ source <(influx completion bash) + +# Ubuntu +$ source /etc/bash_completion.d +$ source <(influx completion bash) +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sh +# macOS +$ source <(influx completion zsh) + +# Ubuntu +$ source <(influx completion zsh) +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} diff --git a/content/influxdb/v2.5/reference/cli/influx/config/_index.md b/content/influxdb/v2.5/reference/cli/influx/config/_index.md new file mode 100644 index 000000000..686c337a1 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/config/_index.md @@ -0,0 +1,65 @@ +--- +title: influx config +description: The `influx config` command and subcommands manage multiple InfluxDB connection configurations. +menu: + influxdb_2_5_ref: + name: influx config + parent: influx +weight: 101 +influxdb/v2.5/tags: [config] +cascade: + related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx config` command displays the active InfluxDB connection configuration +and manages multiple connection configurations stored, by default, in `~/.influxdbv2/configs`. +Each connection includes a URL, token, associated organization, and active setting. +InfluxDB reads the token from the active connection configuration, so you don't +have to manually enter a token to log into InfluxDB. + +## Usage +``` +influx config [flags] +influx config [command] +influx config +``` + +##### Quickly switch between configurations +```sh +# Syntax +influx config + +# Example +influx config local-config +``` + +To quickly switch back to the previous configuration, use the following command: + +```sh +influx config - +``` + +## Examples +```sh +# Show the active connection configuration +influx config + +# Set a connection configuration as active +influx config local-config +``` + +## Subcommands +| Subcommand | Description | +|:---- |:----------- | +| [create](/influxdb/v2.5/reference/cli/influx/config/create/) | Create a connection configuration | +| [list](/influxdb/v2.5/reference/cli/influx/config/list/) | List connection configurations | +| [delete](/influxdb/v2.5/reference/cli/influx/config/rm/) | Delete a connection configuration | +| [set](/influxdb/v2.5/reference/cli/influx/config/set/) | Set or update a connection configuration | + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `config` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/config/create.md b/content/influxdb/v2.5/reference/cli/influx/config/create.md new file mode 100644 index 000000000..9cde4343c --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/config/create.md @@ -0,0 +1,97 @@ +--- +title: influx config create +description: The `influx config create` command creates a InfluxDB connection configuration. +menu: + influxdb_2_5_ref: + name: influx config create + parent: influx config +weight: 201 +updated_in: CLI 2.5.0 +--- + +The `influx config create` command creates a InfluxDB connection configuration +and stores it in a local file: + +| OS/Platform | CLI config file path | +| :--------------------------- | :---------------------------------- | +| macOS | `~/.influxdbv2/configs` | +| Linux (installed as binary) | `~/.influxdbv2/configs` | +| Linux (installed as service) | `~/var/lib/influxdb/configs` | +| Windows | `%USERPROFILE%\.influxdbv2\configs` | +| Docker (DockerHub) | `/etc/influxdb2/configs` | +| Docker (Quay.io) | `/root/.influxdbv2/configs` | +| Kubernetes | `/etc/influxdb2/configs` | + +To view CLI connection configurations after creating them, use [influx config list](/influxdb/v2.5/reference/cli/influx/config/list/). + +{{% note %}} +**Note:** If you create multiple connection configurations (for example, separate admin and user configurations), use [`influx config `](/influxdb/v2.5/reference/cli/influx/config/) to switch to the configuration you want to use. +{{% /note %}} + +## Usage +``` +influx config create [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :-------------------- | :------------------------------------------------------------------------- | :--------: | :-------------------- | +| `-a` | `--active` | Set the specified connection to be the active configuration. | | | +| `-n` | `--config-name` | ({{< req >}}) Name of the new configuration. | string | | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| `-u` | `--host-url` | ({{< req >}}) Connection URL for the new configuration. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name | string | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| `-p` | `--username-password` | **(OSS only)** Username (and optionally password) to use for authentication. +Include `username:password` to ensure a session is automatically authenticated. Include `username` (without password) to prompt for a password before creating the session. | string | + +## Examples + +- [Create a connection configuration and set it active](#create-a-connection-configuration-and-set-it-active) +- [Create a connection configuration without setting it active](#create-a-connection-configuration-without-setting-it-active) +- {{% oss-only %}}[Create a connection configuration that uses a username and password](#create-a-connection-configuration-that-uses-a-username-and-password){{% /oss-only %}} + +#### Create a connection configuration and set it active +```sh +influx config create --active \ + -n config-name \ + -u http://localhost:8086 \ + -t mySuP3rS3cr3tT0keN \ + -o example-org +``` + +#### Create a connection configuration without setting it active +```sh +influx config create \ + -n config-name \ + -u http://localhost:8086 \ + -t mySuP3rS3cr3tT0keN \ + -o example-org +``` + +{{% oss-only %}} + +#### Create a connection configuration that uses a username and password + +The **`influx` CLI 2.4.0+** lets you create connection configurations +that authenticate with **InfluxDB OSS 2.4+** using the username and +password combination that you would use to log into the InfluxDB user interface (UI). +The CLI retrieves a session cookie and stores it, unencrypted, in your +[configs path](/influxdb/v2.5/reference/internals/file-system-layout/#configs-path). + +Use the `--username-password`, `-p` option to provide your username and password +using the `:` syntax. +If no password is provided, the CLI will prompt for a password after each +command that requires authentication. + +```sh +influx config create \ + -n config-name \ + -u http://localhost:8086 \ + -p example-user:example-password \ + -o example-org +``` + +{{% /oss-only %}} diff --git a/content/influxdb/v2.5/reference/cli/influx/config/list.md b/content/influxdb/v2.5/reference/cli/influx/config/list.md new file mode 100644 index 000000000..f4f82a2b8 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/config/list.md @@ -0,0 +1,29 @@ +--- +title: influx config list +description: The `influx config list` command lists all InfluxDB connection configurations. +menu: + influxdb_2_5_ref: + name: influx config list + parent: influx config +weight: 201 +--- + +The `influx config list` command lists all InfluxDB connection configurations in +the `configs` file (by default, stored at `~/.influxdbv2/configs`). +Each connection configuration includes a URL, API token, and active setting. +An asterisk (`*`) indicates the active configuration. + +## Usage +``` +influx config list [flags] +``` + +#### Command aliases +`list`, `ls` + +## Flags +| Flag | | Description | {{< cli/mapped >}} | +| :--- | :--------------- | :------------------------------------------- | :-------------------- | +| `-h` | `--help` | Help for the `list` command | | +| | `--hide-headers` | Hide table headers (default `false`) | `INFLUX_HIDE_HEADERS` | +| | `--json` | Output data as JSON (default `false`) | `INFLUX_OUTPUT_JSON` | diff --git a/content/influxdb/v2.5/reference/cli/influx/config/rm.md b/content/influxdb/v2.5/reference/cli/influx/config/rm.md new file mode 100644 index 000000000..4bb507630 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/config/rm.md @@ -0,0 +1,42 @@ +--- +title: influx config rm +description: The `influx config rm` command removes an InfluxDB connection configuration. +menu: + influxdb_2_5_ref: + name: influx config rm + parent: influx config +weight: 201 +aliases: + - /influxdb/v2.5/reference/cli/influx/config/delete/ +--- + +The `influx config rm` command removes an InfluxDB connection configuration +from the `configs` file (by default, stored at `~/.influxdbv2/configs`). + +## Usage +``` +influx config rm [flags] +``` + +#### Command aliases +`rm`, `remove`, `delete` + + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :--------------- | :------------------------------------------- | :--------: | :-------------------- | +| `-h` | `--help` | Help for the `delete` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | + +## Examples + +##### Delete a connection configuration +```sh +influx config rm local-config +``` + +##### Delete multiple connection configurations +```sh +influx config rm config-1 config-2 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/config/set.md b/content/influxdb/v2.5/reference/cli/influx/config/set.md new file mode 100644 index 000000000..5ffcfacb0 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/config/set.md @@ -0,0 +1,53 @@ +--- +title: influx config set +description: The `influx config set` command updates an InfluxDB connection configuration. +menu: + influxdb_2_5_ref: + name: influx config set + parent: influx config +weight: 201 +updated_in: CLI 2.5.0 +--- + +The `influx config set` command updates information in an InfluxDB connection +configuration in the `configs` file (by default, stored at `~/.influxdbv2/configs`). + +## Usage +``` +influx config set [flags] +``` + +#### Command aliases +`set` , `update` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :--------------- | :-------------------------------------------------------------- | :--------: | :-------------------- | +| `-a` | `--active` | Set the specified connection to active | | | +| `-n` | `--config-name` | Name for the InfluxDB connection configuration to set or update | string | | +| `-h` | `--help` | Help for the `set` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| `-u` | `--host-url` | URL for InfluxDB connection configuration to set or update | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name for the connection configuration | string | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| `-p` | `--username-password` | **(OSS only)** Username (and optionally password) to use for authentication. +Include `username:password` to ensure a session is automatically authenticated. Include `username` (without password) to prompt for a password before creating the session. | string | + +## Examples + +##### Update a connection configuration and set it to active +```sh +influx config set --active \ + -n config-name \ + -t mySuP3rS3cr3tT0keN \ + -o example-org +``` + +##### Update a connection configuration and do not set it to active +```sh +influx config set \ + -n config-name \ + -t mySuP3rS3cr3tT0keN \ + -o example-org +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/dashboards/_index.md b/content/influxdb/v2.5/reference/cli/influx/dashboards/_index.md new file mode 100644 index 000000000..2d4aecca4 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/dashboards/_index.md @@ -0,0 +1,54 @@ +--- +title: influx dashboards +description: > + The `influx dashboards` command lists existing InfluxDB dashboards. +menu: + influxdb_2_5_ref: + name: influx dashboards + parent: influx +weight: 101 +influxdb/v2.5/tags: [telegraf] +related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx dashboards` command lists existing InfluxDB dashboards. + +## Usage +```sh +influx dashboards [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:-----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `dashboards` command | | | +| | `--hide-headers` | Hide table headers | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | Dashboard ID to retrieve | stringArray | | +| | `--json` | Output data as JSON | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### List all dashboards +```sh +influx dashboards +``` + +##### List only specific dashboards +```sh +influx dashboards \ + --id 068ad4a493f2d000 \ + --id 0623f2dabc000121 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/delete/_index.md b/content/influxdb/v2.5/reference/cli/influx/delete/_index.md new file mode 100644 index 000000000..fd634a337 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/delete/_index.md @@ -0,0 +1,89 @@ +--- +title: influx delete +description: The `influx delete` command deletes points from an InfluxDB bucket. +menu: + influxdb_2_5_ref: + name: influx delete + parent: influx +weight: 101 +influxdb/v2.5/tags: [delete] +related: + - /influxdb/v2.5/write-data/delete-data + - /influxdb/v2.5/reference/syntax/delete-predicate + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +metadata: [influx CLI 2.0.3+, InfluxDB 2.0.3+] +updated_in: CLI v2.3.0 +--- + +The `influx delete` command deletes [points](/influxdb/v2.5/reference/glossary/#point) +from an InfluxDB bucket in a specified time range. +Select points to delete within the specified time range using [delete predicate syntax](/influxdb/v2.5/reference/syntax/delete-predicate). + +{{% warn %}} +#### Deleting data without a delete predicate + +Running `influx delete` without the `-p` or `--predicate` flag deletes all data with timestamps between the specified +`--start` and `--stop` times in the specified bucket. + +{{% oss-only %}} + +#### Cannot delete data by field + +InfluxDB {{< current-version >}} does not support deleting data **by field**. + +{{% /oss-only %}} +{{% /warn %}} + + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| `-b` | `--bucket` | Name of bucket to remove data from (mutually exclusive with `--bucket-id`) | string | `INFLUX_BUCKET_NAME` | +| | `--bucket-id` | Bucket ID (mutually exclusive with `--bucket`) | string | `INFLUX_BUCKET_ID` | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `delete` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| `-p` | `--predicate` | InfluxQL-like predicate string (see [Delete predicate](/influxdb/v2.5/reference/syntax/delete-predicate)) | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--start` | ({{< req >}}) Start time in RFC3339 format (i.e. `2009-01-02T23:00:00Z`) | string | | +| | `--stop` | ({{< req >}}) Stop time in RFC3339 format (i.e. `2009-01-02T23:00:00Z`) | string | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +- [Delete all points in a measurement](#delete-all-points-in-a-measurement) +- [Delete points in a measurement with a specific tag value](#delete-points-in-a-measurement-with-a-specific-tag-value) +- [Delete all points within a specified time frame](#delete-all-points-within-a-specified-time-frame) + +##### Delete all points in a measurement +```sh +influx delete \ + --bucket example-bucket \ + --start 1970-01-01T00:00:00Z \ + --stop $(date +"%Y-%m-%dT%H:%M:%SZ") \ + --predicate '_measurement="example-measurement"' +``` + +##### Delete points in a measurement with a specific tag value +```sh +influx delete \ + --bucket example-bucket \ + --start 1970-01-01T00:00:00Z \ + --stop $(date +"%Y-%m-%dT%H:%M:%SZ") \ + --predicate '_measurement="example-measurement" AND host="old-host"' +``` + +##### Delete all points within a specified time frame +```sh +influx delete \ + --bucket example-bucket \ + --start 2020-03-01T00:00:00Z \ + --stop 2020-11-14T00:00:00Z +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/export/_index.md b/content/influxdb/v2.5/reference/cli/influx/export/_index.md new file mode 100644 index 000000000..b3600bd8e --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/export/_index.md @@ -0,0 +1,111 @@ +--- +title: influx export +description: The `influx export` command exports existing resources as an InfluxDB template. +menu: + influxdb_2_5_ref: + parent: influx +weight: 101 +aliases: + - /influxdb/v2.5/reference/cli/influx/pkg/export/ +related: + - /influxdb/v2.5/influxdb-templates/create/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +cascade: + metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx export` command exports existing resources as an InfluxDB template. +_For detailed examples of exporting InfluxDB templates, see +[Create an InfluxDB template](/influxdb/v2.5/influxdb-templates/create/)._ + +## Usage + +``` +influx export [flags] +influx export [command] +``` + +## Available subcommands + +| Subcommand | Description | +|:---------- |:----------- | +| [all](/influxdb/v2.5/reference/cli/influx/export/all/) | Export all resources in an organization as a template | +| [stack](/influxdb/v2.5/reference/cli/influx/export/stack/) | Export all resources associated with a stack as a template | + +## Flags + +| Flag | | Description | Input Type | {{< cli/mapped >}} | +|:-----|:--------------------------|:---------------------------------------------------------------------------------|:-----------|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--bucket-names` | Comma-separated list of bucket names | string | | +| | `--buckets` | Comma-separated list of bucket IDs | string | | +| | `--check-names` | Comma-separated list of check names | string | | +| | `--checks` | Comma-separated list of check IDs | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| | `--dashboard-names` | Comma-separated list of dashboard names | string | | +| | `--dashboards` | Comma-separated list of dashboard IDs | string | | +| | `--endpoint-names` | Comma-separated list of notification endpoint names | string | | +| | `--endpoints` | Comma-separated list of notification endpoint IDs | string | | +| `-f` | `--file` | Template output file. Defaults to stdout. Use `.yml` or `.json` file extensions. | string | | +| `-h` | `--help` | Help for the `export` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--label-names` | Comma-separated list of label names | string | | +| | `--labels` | Comma-separated list of label IDs | string | | +| | `--resource-type` | Resource type associated with all IDs via stdin | string | | +| | `--rule-names` | Comma-separated list of notification rule names | string | | +| | `--rules` | Comma-separated list of notification rule IDs | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--stack-id` | Stack ID to include resources from in export | string | | +| | `--task-names` | Comma-separated list of task names | string | | +| | `--tasks` | Comma-separated list of task IDs | string | | +| | `--telegraf-config-names` | Comma-separated list of Telegraf configuration names | string | | +| | `--telegraf-configs` | Comma-separated list of Telegraf configuration IDs | string | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| | `--variable-names` | Comma-separated list of variable names | string | | +| | `--variables` | Comma-separated list of variable IDs | string | | + +## Examples + +{{< cli/influx-creds-note >}} + +- [Export buckets by ID](#export-buckets-by-id) +- [Export buckets, labels, and dashboards by ID](#export-buckets-labels-and-dashboards-by-id) +- [Export buckets, labels, and dashboards by name](#export-buckets-labels-and-dashboards-by-name) +- [Export all resources associated with a stack](#export-all-resources-associated-with-a-stack) +- [Export resources both associated and not associated with a stack](#export-resources-both-associated-and-not-associated-with-a-stack) + +##### Export buckets by ID +```sh +influx export --buckets 0Xx0oox00XXoxxoo1,0Xx0oox00XXoxxoo2 +``` + +##### Export buckets, labels, and dashboards by ID +```sh +influx export \ + --buckets 0Xx0oox00XXoxxoo1,0Xx0oox00XXoxxoo2 \ + --labels o0x0oox0Xxoxx001,o0x0oox0Xxoxx002 \ + --dashboards 0XxXooXoo0xooXo0X1,0XxXooXoo0xooXo0X2 +``` + +##### Export buckets, labels, and dashboards by name +```sh +influx export \ + --bucket-names bucket1,bucket2,bucket3 \ + --label-names label1,label2,label3 \ + --dashboard-names dashboard1,dashboard2,dashboard3 +``` + +##### Export all resources associated with a stack +```sh +influx export --stack-id 0Xx0oox00XXoxxoo1 +``` + +##### Export resources both associated and not associated with a stack +```sh +influx export \ + --stack-id 0Xx0oox00XXoxxoo1 \ + --buckets o0x0oox0Xxoxx001,0XxXooXoo0xooXo0X2 \ + --dashboard-names bucket1 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/export/all.md b/content/influxdb/v2.5/reference/cli/influx/export/all.md new file mode 100644 index 000000000..65ffcce11 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/export/all.md @@ -0,0 +1,107 @@ +--- +title: influx export all +description: > + The `influx export all` command exports all resources in an organization as an InfluxDB template. +menu: + influxdb_2_5_ref: + parent: influx export +weight: 201 +aliases: + - /influxdb/v2.5/reference/cli/influx/pkg/export/all/ +related: + - /influxdb/v2.5/influxdb-templates/create/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +--- + +The `influx export all` command exports all resources in an +organization as an InfluxDB template. +_For detailed examples of exporting InfluxDB templates, see +[Create an InfluxDB template](/influxdb/v2.5/influxdb-templates/create/)._ + +{{% note %}} +To export resources as a template, you must use the **Operator token** created for +the initial InfluxDB user or an **All-Access token**. +For information about creating an All-Access API token, see [Create an API token](/influxdb/v2.5/security/tokens/create-token/). +{{% /note %}} + +## Usage +``` +influx export all [flags] +``` + +## Flags +| Flag | | Description | Input Type | {{< cli/mapped >}} | +|:-----|:------------------|:------------------------------------------------------------------------------------------------|:------------|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-f` | `--file` | Template output file. Defaults to stdout. Use `.yml` or `.json` file extensions. | string | | +| | `--filter` | Specify resources to export by labelName or resourceKind (format: `--filter=labelName=example`) | stringArray | | +| `-h` | `--help` | Help for the `export all` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-o` | `--org` | Organization name that owns the resources (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID that owns the resources (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Resources +The following resources can be exported: + +- Bucket +- Check +- CheckDeadman +- CheckThreshold +- Dashboard +- Label +- NotificationEndpoint +- NotificationEndpointHTTP +- NotificationEndpointPagerDuty +- NotificationEndpointSlack +- NotificationRule +- Task +- Telegraf +- Variable + +For additional information on each resource, please see +[template resources](/influxdb/v2.5/influxdb-templates/#template-resources). + +## Examples + +{{< cli/influx-creds-note >}} + +- [Export all resources in an organization as a template](#export-all-resources-in-an-organization-as-a-template) +- [Export all bucket resources as a template](#export-all-bucket-resources-as-a-template) +- [Export all resources associated with label Foo](#export-all-resources-associated-with-label-foo) +- [Export all bucket resources and with label Foo](#export-all-bucket-resources-with-label-foo) +- [Export all bucket or dashboard resources with label Foo](#export-all-bucket-or-dashboard-resources-with-label-foo) + +##### Export all resources in an organization as a template +```sh +influx export all +``` + +##### Export all bucket resources as a template +```sh +influx export all --filter=resourceKind=Bucket +``` + +##### Export all resources associated with label Foo +```sh +influx export all --filter=labelName=Foo +``` + +##### Export all bucket resources and with label Foo +```sh +influx export all \ + --filter=resourceKind=Bucket \ + --filter=labelName=Foo +``` + +##### Export all bucket or dashboard resources with label Foo +```sh +influx export all \ + --filter=resourceKind=Bucket \ + --filter=resourceKind=Dashboard \ + --filter=labelName=Foo +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/export/stack.md b/content/influxdb/v2.5/reference/cli/influx/export/stack.md new file mode 100644 index 000000000..4bfc464e2 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/export/stack.md @@ -0,0 +1,51 @@ +--- +title: influx export stack +description: > + The `influx export stack` command exports all resources associated with a stack as an InfluxDB template. +menu: + influxdb_2_5_ref: + parent: influx export +weight: 201 +aliases: + - /influxdb/v2.5/reference/cli/influx/pkg/export/stack +related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +--- + +The `influx export stack` command exports all resources associated with a stack as a template. +All `metadata.name` fields remain the same. + +{{% note %}} +To export resources as a template, you must use the **Operator token** created for +the initial InfluxDB user or an **All-Access token**. +For information about creating an All-Access API token, see [Create an API token](/influxdb/v2.5/security/tokens/create-token/). +{{% /note %}} + +## Usage +``` +influx export stack [flags] +``` + +## Flags +| Flag | | Description | Input Type | {{< cli/mapped >}} | +|:-----|:------------------|:---------------------------------------------------------------------------------|:-----------|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-f` | `--file` | Template output file. Defaults to stdout. Use `.yml` or `.json` file extensions. | string | | +| `-h` | `--help` | Help for the `export stack` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-o` | `--org` | Organization name that owns the resources (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID that owns the resources (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Export a stack as a template +```sh +influx export stack $STACK_ID +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/help/_index.md b/content/influxdb/v2.5/reference/cli/influx/help/_index.md new file mode 100644 index 000000000..31d718141 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/help/_index.md @@ -0,0 +1,25 @@ +--- +title: influx help +description: The `influx help` command provides help for any command in the `influx` command line interface. +menu: + influxdb_2_5_ref: + name: influx help + parent: influx +weight: 101 +related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx help` command provides help for any command in the `influx` command line interface. + +## Usage +``` +influx help [command] [flags] +``` + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `help` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/org/_index.md b/content/influxdb/v2.5/reference/cli/influx/org/_index.md new file mode 100644 index 000000000..5c92acb82 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/org/_index.md @@ -0,0 +1,40 @@ +--- +title: influx org +description: The `influx org` command and its subcommands manage organization information in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx org + parent: influx +weight: 101 +influxdb/v2.5/tags: [organizations] +cascade: + related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx org` command and its subcommands manage organization information in InfluxDB. + +## Usage +``` +influx org [flags] +influx org [command] +``` + +#### Command aliases +`org`, `organization` + +## Subcommands +| Subcommand | Description | +|:---------- |:----------- | +| [create](/influxdb/v2.5/reference/cli/influx/org/create) | Create an organization | +| [delete](/influxdb/v2.5/reference/cli/influx/org/delete) | Delete an organization | +| [list](/influxdb/v2.5/reference/cli/influx/org/list) | List organizations | +| [members](/influxdb/v2.5/reference/cli/influx/org/members) | Organization membership commands | +| [update](/influxdb/v2.5/reference/cli/influx/org/update) | Update an organization | + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `org` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/org/create.md b/content/influxdb/v2.5/reference/cli/influx/org/create.md new file mode 100644 index 000000000..3de3ecc97 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/org/create.md @@ -0,0 +1,47 @@ +--- +title: influx org create +description: The `influx org create` creates an organization in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx org create + parent: influx org +weight: 201 +--- + +The `influx org create` creates an organization in InfluxDB. + +## Usage +``` +influx org create [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-d` | `--description` | Description of the organization | | | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | ({{< req >}}) Organization name | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Create an organization +```sh +influx org create --name example-org +``` + +##### Create an organization with a description +```sh +influx org create \ + --name example-org \ + --description "Example organization description" +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/org/delete.md b/content/influxdb/v2.5/reference/cli/influx/org/delete.md new file mode 100644 index 000000000..01c295582 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/org/delete.md @@ -0,0 +1,39 @@ +--- +title: influx org delete +description: The `influx org delete` command deletes an organization in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx org delete + parent: influx org +weight: 201 +--- + +The `influx org delete` command deletes an organization in InfluxDB. + +## Usage +``` +influx org delete [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `delete` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | ({{< req >}}) Organization ID | string | `INFLUX_ORG_ID` | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Delete an organization +```sh +influx org delete --id 0Xx0oox00XXoxxoo1 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/org/list.md b/content/influxdb/v2.5/reference/cli/influx/org/list.md new file mode 100644 index 000000000..96718ad8f --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/org/list.md @@ -0,0 +1,59 @@ +--- +title: influx org list +description: The `influx org list` lists and searches for organizations in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx org list + parent: influx org +weight: 201 +aliases: + - /influxdb/v2.5/reference/influx/org/find +--- + +The `influx org list` lists and searches for organizations in InfluxDB. + +## Usage +``` +influx org list [flags] +``` + +#### Command aliases +`list`, `ls`, `find` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | Organization ID | string | `INFLUX_ORG` | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | Organization name | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +- [List all organizations](#list-all-organizations) +- [List a specific organization by name](#list-a-specific-organization-by-name) +- [List a specific organization by ID](#list-a-specific-organization-by-id) + +##### List all organizations +```sh +influx org list +``` + +##### List a specific organization by name +```sh +influx org list --name example-org +``` + +##### List a specific organization by ID +```sh +influx org list --id 0Xx0oox00XXoxxoo1 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/org/members/_index.md b/content/influxdb/v2.5/reference/cli/influx/org/members/_index.md new file mode 100644 index 000000000..e407daa25 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/org/members/_index.md @@ -0,0 +1,30 @@ +--- +title: influx org members +description: The `influx org members` command and its subcommands manage organization members in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx org members + parent: influx org +weight: 201 +influxdb/v2.5/tags: [members, organizations] +--- + +The `influx org members` command and its subcommands manage organization members in InfluxDB. + +## Usage +``` +influx org members [flags] +influx org members [command] +``` + +## Subcommands +| Subcommand | Description | +|:---------- |:----------- | +| [add](/influxdb/v2.5/reference/cli/influx/org/members/add) | Add organization member | +| [list](/influxdb/v2.5/reference/cli/influx/org/members/list) | List organization members | +| [remove](/influxdb/v2.5/reference/cli/influx/org/members/remove) | Remove organization member | + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `members` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/org/members/add.md b/content/influxdb/v2.5/reference/cli/influx/org/members/add.md new file mode 100644 index 000000000..125283832 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/org/members/add.md @@ -0,0 +1,53 @@ +--- +title: influx org members add +description: The `influx org members add` command adds a member to an organization in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx org members add + parent: influx org members +weight: 301 +updated_in: CLI v2.4.0 +metadata: [influx CLI 2.0.0+, InfluxDB OSS only] +related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + - /influxdb/v2.5/organizations/members/add-member/ +--- + +The `influx org members add` command adds a member to an organization in InfluxDB. + +## Usage +``` +influx org members add [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :-------------- | :--------------------------------------------------------- | :--------: | :------------------- | +| `-h` | `--help` | Help for the `add` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| `-i` | `--id` | Organization ID | string | `INFLUX_ORG_ID` | +| `-m` | `--member` | User ID | string | | +| `-n` | `--name` | Organization name | string | `INFLUX_ORG` | +| | ` --owner` | Set new member as an owner | | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Add a member to an organization +```sh +influx org members add \ + --member 00x0oo0X0xxxo000 \ + --name example-org +``` + +##### Add a member to an organization and make them an owner +```sh +influx org members add \ + --member 00x0oo0X0xxxo000 \ + --name example-org \ + --owner +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/org/members/list.md b/content/influxdb/v2.5/reference/cli/influx/org/members/list.md new file mode 100644 index 000000000..90d5f6aaa --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/org/members/list.md @@ -0,0 +1,39 @@ +--- +title: influx org members list +description: The `influx org members list` command lists members within an organization in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx org members list + parent: influx org members +weight: 301 +--- + +The `influx org members list` command lists members within an organization in InfluxDB. + +## Usage +``` +influx org members list [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:-----------------|:-----------------------------------------------------------|:----------:|:----------------------| +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | Organization ID | string | `INFLUX_ORG_ID` | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | Organization name | string | `INFLUX_ORG` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### List members of an organization +```sh +influx org members list \ + --name example-org +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/org/members/remove.md b/content/influxdb/v2.5/reference/cli/influx/org/members/remove.md new file mode 100644 index 000000000..accdd030c --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/org/members/remove.md @@ -0,0 +1,40 @@ +--- +title: influx org members remove +description: The `influx org members remove` command removes a member from an organization in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx org members remove + parent: influx org members +weight: 301 +metadata: [influx CLI 2.0.0+, InfluxDB OSS only] +--- + +The `influx org members remove` command removes a member from an organization in InfluxDB. + +## Usage +``` +influx org members remove [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:----------------|:-----------------------------------------------------------|:----------:|:---------------------| +| `-h` | `--help` | Help for the `remove` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | Organization ID | string | `INFLUX_ORG_ID` | +| `-m` | `--member` | Member ID | string | | +| `-n` | `--name` | Organization name | string | `INFLUX_ORG` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Remove a member from an organization +```sh +influx org members remove \ + --member 00x0oo0X0xxxo000 \ + --name example-org +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/org/update.md b/content/influxdb/v2.5/reference/cli/influx/org/update.md new file mode 100644 index 000000000..0facaeb78 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/org/update.md @@ -0,0 +1,50 @@ +--- +title: influx org update +description: The `influx org update` command updates information related to organizations in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx org update + parent: influx org +weight: 201 +--- + +The `influx org update` command updates information related to organizations in InfluxDB. + +## Usage +``` +influx org update [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:-------------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-d` | `--description` | New description for the organization | string | `INFLUX_ORG_DESCRIPTION` | +| `-h` | `--help` | Help for the `update` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | ({{< req >}}) Organization ID | string | `INFLUX_ORG_ID` | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | New organization name | string | `INFLUX_ORG` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Update the name of an organization +```sh +influx org update \ + --id 0Xx0oox00XXoxxoo1 + --name new-org-name +``` + +##### Update the description of an organization +```sh +influx org update \ + --id 0Xx0oox00XXoxxoo1 + --description "New example organization description" +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/ping/_index.md b/content/influxdb/v2.5/reference/cli/influx/ping/_index.md new file mode 100644 index 000000000..e633bcd07 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/ping/_index.md @@ -0,0 +1,35 @@ +--- +title: influx ping +description: > + The `influx ping` command checks the health of a running InfluxDB instance by + querying the `/health` endpoint. +menu: + influxdb_2_5_ref: + name: influx ping + parent: influx +weight: 101 +influxdb/v2.5/tags: [ping, health] +related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx ping` command checks the health of a running InfluxDB instance by +querying the `/health` endpoint. +It does not require an API token. + +## Usage +``` +influx ping [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:-----------|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `ping` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | diff --git a/content/influxdb/v2.5/reference/cli/influx/query/_index.md b/content/influxdb/v2.5/reference/cli/influx/query/_index.md new file mode 100644 index 000000000..b2bebf4e0 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/query/_index.md @@ -0,0 +1,87 @@ +--- +title: influx query +description: > + The `influx query` command executes a literal Flux query provided as a string + or a literal Flux query contained in a file. +menu: + influxdb_2_5_ref: + name: influx query + parent: influx +weight: 101 +influxdb/v2.5/tags: [query] +related: + - /influxdb/v2.5/query-data/ + - /influxdb/v2.5/query-data/execute-queries/influx-query/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +updated_in: CLI v2.0.5 +--- + +The `influx query` command executes a literal Flux query provided as a string +or a literal Flux query contained in a file. + +## Usage +``` +influx query [query literal] [flags] +``` + +{{% note %}} +#### Remove unnecessary columns in large datasets +When using the `influx query` command to query and download large datasets, +drop columns such as `_start` and `_stop` to optimize the download file size. + +```js +// ... + |> drop(columns: ["_start", "_stop"]) +``` +{{% /note %}} + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-f` | `--file` | Path to Flux script file | string | | +| `-h` | `--help` | Help for the `query` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| `-p` | `--profilers` | Flux query profilers to enable (comma-separated) | string | | +| `-r` | `--raw` | Output raw query results (annotated CSV) | | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +- [Query InfluxDB with a Flux string](#query-influxdb-with-a-flux-string) +- [Query InfluxDB using a Flux file](#query-influxdb-with-a-flux-file) +- [Query InfluxDB and return annotated CSV](#query-influxdb-and-return-annotated-csv) +- [Query InfluxDB and append query profile data to results](#query-influxdb-and-append-query-profile-data-to-results) + +##### Query InfluxDB with a Flux string +```sh +influx query 'from(bucket:"example-bucket") |> range(start:-1m)' +``` + +##### Query InfluxDB with a Flux file +```sh +influx query --file /path/to/example-query.flux +``` + +##### Query InfluxDB and return annotated CSV +```sh +influx query 'from(bucket:"example-bucket") |> range(start:-1m)' --raw +``` + +##### Query InfluxDB and append query profile data to results +_For more information about profilers, see [Flux profilers](/{{< latest "flux" >}}/stdlib/profiler/#available-profilers)._ + +```sh +influx query \ + --profilers operator,query \ + 'from(bucket:"example-bucket") |> range(start:-1m)' +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/remote/_index.md b/content/influxdb/v2.5/reference/cli/influx/remote/_index.md new file mode 100644 index 000000000..7765ebde3 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/remote/_index.md @@ -0,0 +1,39 @@ +--- +title: influx remote +description: Manage remote InfluxDB connections for replicating data. +menu: + influxdb_2_5_ref: + name: influx remote + parent: influx +weight: 101 +influxdb/v2.5/tags: [write, replication] +related: + - /influxdb/v2.5/reference/cli/influx/replication + - /influxdb/v2.5/write-data/replication +--- + +{{% cloud %}} +Configure InfluxDB Edge Data Replication remotes and replication streams to replicate data from InfluxDB OSS to remote buckets on InfluxDB Cloud, InfluxDB Enterprise, or another InfluxDB OSS instance. Currently, you cannot configure remotes and replication streams on InfluxDB Cloud. +{{% /cloud %}} + +Use the `influx remote` command to manage connections to remote instances of InfluxDB. +Remote connections are used to replicate data on write at the bucket level. + +## Usage +``` +influx remote [commond options] [arguments...] +``` + +## Subcommands + +| Subcommand | Description | +|:--------------------------------------------------------------|--------------------------------------| +| [`create`](/influxdb/v2.5/reference/cli/influx/remote/create) | Create a new remote connection | +| [`delete`](/influxdb/v2.5/reference/cli/influx/remote/delete) | Delete a remote connection | +| [`list`](/influxdb/v2.5/reference/cli/influx/remote/list) | List remote connections | +| [`update`](/influxdb/v2.5/reference/cli/influx/remote/update) | Update a remote connection | + +## Flags +| Flag | | Description | +|:-----|:---------|:------------------------------| +| `-h` | `--help` | Help for the `remote` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/remote/create.md b/content/influxdb/v2.5/reference/cli/influx/remote/create.md new file mode 100644 index 000000000..64b208c61 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/remote/create.md @@ -0,0 +1,60 @@ +--- +title: influx remote create +description: Create a new remote InfluxDB connection for replicating data. +menu: + influxdb_2_5_ref: + name: influx remote create + parent: influx remote +weight: 101 +influxdb/v2.5/tags: [write, replication] +related: + - /influxdb/v2.5/reference/cli/influx/replication +--- + +{{% cloud %}} +Replication remotes and replication streams can only be configured for InfluxDB OSS. +{{% /cloud %}} + +The `influx remote create` command creates a new remote InfluxDB connection for replicating data. + +## Usage +``` +influx remote create [commond options] [arguments...] +``` + +## Flags + +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :--------------------- | :-------------------------------------------------------------------- | :--------: | :-------------------- | +| | `--org-id` | Organization ID | string | `INFLUX_ORG_ID` | +| `-o` | `--org` | Organization name | string | `INFLUX_ORG` | +| `-n` | `--name` | Remote connection name | string | | +| `-d` | `--description` | Remote connection description | string | | +| | `--remote-url` | Remote InfluxDB instance URL | string | | +| | `--remote-api-token` | Remote InfluxDB API token | string | | +| | `--remote-org-id` | Remote InfluxDB organization ID | string | | +| | `--allow-insecure-tls` | Allows insecure TLS (self-signed certificates) | | | +| | `--host` | InfluxDB HTTP address (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--http-debug` | Inspect communication with InfluxDB servers | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| `-t` | `--token` | InfluxDB API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +### Create a new remote with InfluxDB Cloud + +```sh +influx remote create \ + --name myremote \ + --org-id \ + --token \ + --remote-url \ + --remote-api-token \ + --remote-org-id +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/remote/delete.md b/content/influxdb/v2.5/reference/cli/influx/remote/delete.md new file mode 100644 index 000000000..35337e2e8 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/remote/delete.md @@ -0,0 +1,55 @@ +--- +title: influx remote delete +description: Delete remote InfluxDB connections used for replicating data. +menu: + influxdb_2_5_ref: + name: influx remote delete + parent: influx remote +weight: 102 +influxdb/v2.5/tags: [write, replication] +related: + - /influxdb/v2.5/reference/cli/influx/replication +--- + +{{% cloud %}} +Replication remotes and replication streams can only be configured for InfluxDB OSS. +{{% /cloud %}} + +The `influx remote delete` command delete an existing remote InfluxDB connection used for replication. + +## Usage +``` +influx remote delete [command options] [arguments...] +``` + +## Flags + +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :-------------------------------------------------------------------- | :--------: | :-------------------- | +| `-i` | `--id` | Remote connection ID to delete | | | +| | `--host` | InfluxDB HTTP address (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--http-debug` | Inspect communication with InfluxDB servers | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| `-t` | `--token` | InfluxDB API token | string | `INFLUX_TOKEN` | + +## Examples +{{< cli/influx-creds-note >}} + +### Delete a remote +1. Use `influx remote list` to get the ID for the remote you want to delete. + ```sh + $ influx remote list --org-id --token + ID Name Org ID + 0ooxX0xxXo0x myremote [...] + ``` +2. Use the following command to delete the remote: + ```sh + influx remote delete \ + --org-id \ + --token \ + --id 0ooxX0xxXo0x + ``` diff --git a/content/influxdb/v2.5/reference/cli/influx/remote/list.md b/content/influxdb/v2.5/reference/cli/influx/remote/list.md new file mode 100644 index 000000000..4d053e385 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/remote/list.md @@ -0,0 +1,57 @@ +--- +title: influx remote list +description: List remote InfluxDB connections sued for replicating data. +menu: + influxdb_2_5_ref: + name: influx remote list + parent: influx remote +weight: 102 +influxdb/v2.5/tags: [write, replication] +related: + - /influxdb/v2.5/reference/cli/influx/replication +--- + +{{% cloud %}} +Replication remotes and replication streams can only be configured for InfluxDB OSS. +{{% /cloud %}} + +The `influx remote list` command lists all remote InfluxDB connections used for replication. + +## Usage + +``` +influx remote list [command options] [arguments...] +``` + +## Flags + +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :-------------------------------------------------------------------- | :--------: | :-------------------- | +| `-n` | `--name` | Filter remote connections by name | | | +| | `--org-id` | Organization ID | string | `INFLUX_ORG_ID` | +| `-o` | `--org` | Organization name | string | `INFLUX_ORG` | +| | `--remote-url` | Filter remote connections by remote URL | string | | +| | `--host` | InfluxDB HTTP address (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--http-debug` | Inspect communication with InfluxDB servers | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| `-t` | `--token` | InfluxDB API token | string | `INFLUX_TOKEN` | + + +## Examples +{{< cli/influx-creds-note >}} + +### List all remotes + +```sh +influx remote list +``` + +### List a specific remote by name + +```sh +influx remote list --name example-name +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/remote/update.md b/content/influxdb/v2.5/reference/cli/influx/remote/update.md new file mode 100644 index 000000000..94c8d1348 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/remote/update.md @@ -0,0 +1,66 @@ +--- +title: influx remote update +description: Update remote InfluxDB connections used for for replicating data. +menu: + influxdb_2_5_ref: + name: influx remote update + parent: influx remote +weight: 102 +influxdb/v2.5/tags: [write, replication] +related: + - /influxdb/v2.5/reference/cli/influx/replication +--- + + +{{% cloud %}} +Replication remotes and replication streams can only be configured for InfluxDB OSS. +{{% /cloud %}} + +The `influx remote update` command updates an existing InfluxDB remote connection used for replicating data. + +## Usage +``` +influx remote update [command options] [arguments...] +``` + +## Flags + +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | ---------------------- | --------------------------------------------------------------------- | ---------- | --------------------- | +| `-i` | `--id` | Remote connection ID to update | string | | +| `-n` | `--name` | New name for the remote connection | string | | +| `-d` | `--description` | New remote connection description | string | | +| | `--remote-url` | New remote InfluxDB URL | string | | +| | `--remote-api-token` | New remote InfluxDB API token | string | | +| | `--remote-org-id` | New remote organization ID | string | | +| | `--allow-insecure-tls` | Allows insecure TLS connections | | | +| | `--host` | InfluxDB HTTP address (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--http-debug` | Inspect communication with InfluxDB servers | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| `-t` | `--token` | InfluxDB API token | string | `INFLUX_TOKEN` | + +## Example +{{< cli/influx-creds-note >}} + +### Update a remote +1. Use `influx remote list` to get the ID for the remote you want to update. + ```sh + $ influx remote list + ID Name Org ID + 0ooxX0xxXo0x myremote [...] + ``` +2. Use the following command to update the remote: + ```sh + influx remote remote \ + --id 0ooxX0xxXo0x + --name new-example-name + --description new-examle-description + --remote-url http://new-example-url.com + --remote-api-token myN3wS3crE7t0k3n== + --remote-org-id new-example-org-id + ``` + diff --git a/content/influxdb/v2.5/reference/cli/influx/repl/_index.md b/content/influxdb/v2.5/reference/cli/influx/repl/_index.md new file mode 100644 index 000000000..251f01666 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/repl/_index.md @@ -0,0 +1,44 @@ +--- +title: influx repl +description: > + The `influx repl` command opens an interactive Read-Eval-Print Loop (REPL) + from which you can run Flux commands. +influxdb/v2.5/tags: [query] +related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +--- + +{{% warn %}} +#### Removed in InfluxDB 2.0 beta-16 +The `influx repl` command was removed in **InfluxDB 2.0 beta-16**. +To use the Flux REPL, build the REPL from source. +For more information, see the [Flux GitHub repository](https://github.com/influxdata/flux/#readme). +{{% /warn %}} + +The `influx repl` command opens an interactive Read-Eval-Print Loop (REPL) +from which you can run Flux commands. + +## Usage +``` +influx repl [flags] +``` + +{{% note %}} +Use **ctrl + d** to exit the REPL. +{{% /note %}} + +To use the Flux REPL, you must first authenticate with a [token](/influxdb/v2.5/security/tokens/view-tokens/). + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `repl` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | diff --git a/content/influxdb/v2.5/reference/cli/influx/replication/_index.md b/content/influxdb/v2.5/reference/cli/influx/replication/_index.md new file mode 100644 index 000000000..64a3a5f48 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/replication/_index.md @@ -0,0 +1,35 @@ +--- +title: influx replication +description: Use the `influx` CLI to manage InfluxDB replication streams. +menu: + influxdb_2_5_ref: + name: influx replication + parent: influx +weight: 101 +influxdb/v2.5/tags: [write, replication] +cascade: + related: + - /influxdb/v2.5/reference/cli/influx/remote + - /influxdb/v2.5/write-data/replication/replicate-data/ +--- + +The `influx replication` command and its subcommands manage InfluxDB Edge Data Replication. + +## Usage +``` +influx replication [commond options] [arguments...] +``` + +## Subcommands +| Subcommand | Description | +| :--------------------------------------------------------------- | :--------------------------------------- | +| [create](/influxdb/v2.5/reference/cli/influx/replication/create) | Create a new replication stream | +| [delete](/influxdb/v2.5/reference/cli/influx/replication/delete) | Delete a replication stream | +| [list](/influxdb/v2.5/reference/cli/influx/replication/list) | List all replication streams and metrics | +| [update](/influxdb/v2.5/reference/cli/influx/replication/update) | Update a replication stream | + +## Flags +| Flag | | Description | +| :--- | :------- | :--------------------------------- | +| `-h` | `--help` | Help for the `replication` command | + diff --git a/content/influxdb/v2.5/reference/cli/influx/replication/create.md b/content/influxdb/v2.5/reference/cli/influx/replication/create.md new file mode 100644 index 000000000..58cb78845 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/replication/create.md @@ -0,0 +1,69 @@ +--- +title: influx replication create +description: Create a new InfluxDB replication stream. +menu: + influxdb_2_5_ref: + name: influx replication create + parent: influx replication +weight: 101 +influxdb/v2.5/tags: [write] +--- + +{{% cloud %}} +Replication remotes and replication streams can only be configured for InfluxDB OSS. +{{% /cloud %}} + + +The `influx replication create` command creates a new InfluxDB replication stream. + +## Usage +``` +influx replication create [command options] [arguments...] +``` + +## Flags + +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :----------------------------- | :------------------------------------------------------------------------------------- | :--------: | :-------------------- | +| `-n` | `--name` | Replication stream name | string | | +| `-d` | `--description` | Replication stream description | string | | +| | `--org-id` | Local organization ID | string | `INFLUX_ORG_ID` | +| `-o` | `--org` | Local organization name | string | `INFLUX_ORG` | +| | `--remote-id` | Remote connection ID to replicate data to | string | | +| | `--local-bucket-id` | Local bucket ID to replicate data from | string | | +| | `--remote-bucket` | Remote bucket name to replicate data to (mutually exclusive with `--remote-bucket-id`) | string | | +| | `--remote-bucket-id` | Remote bucket ID to replicate data to (mutually exclusive with `--remote-bucket-name`) | string | | +| | `--max-queue-bytes` | Max queue size in bytes (default: `67108860`) | integer | | +| | `--drop-non-retryable-data` | Drop data when a non-retryable error is encountered | | | +| | `--no-drop-non-retryable-data` | Do not drop data when a non-retryable error is encountered | | | +| | `--max-age` | Specify a maximum age (in seconds) for data before it is dropped | integer | | +| | `--host` | InfluxDB HTTP address (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--http-debug` | Inspect communication with InfluxDB servers | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| `-t` | `--token` | InfluxDB API token | string | `INFLUX_TOKEN` | + + +## Examples +{{< cli/influx-creds-note >}} + +### Create a replication stream + +1. [Create a remote connection](/influxdb/v2.5/reference/cli/influx/remote/create/), if you haven't already. +2. Use `influx remote list` to get the ID for the remote you want to replicate data to. + ```sh + $ influx remote list + ID Name Org ID + 0ooxX0xxXo0x myremote [...] + ``` +3. Create the replication: + ```sh + influx replication create \ + --name myreplication + --local-bucket example-local-bucket + --remote-bucket example-remote-bucket + --remote-id 0ooxX0xxXo0x + ``` diff --git a/content/influxdb/v2.5/reference/cli/influx/replication/delete.md b/content/influxdb/v2.5/reference/cli/influx/replication/delete.md new file mode 100644 index 000000000..46a7d984d --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/replication/delete.md @@ -0,0 +1,50 @@ +--- +title: influx replication delete +description: Delete an InfluxDB replication stream. +menu: + influxdb_2_5_ref: + name: influx replication delete + parent: influx replication +weight: 102 +influxdb/v2.5/tags: [write, replication] +--- + +{{% cloud %}} +Replication remotes and replication streams can only be configured for InfluxDB OSS. +{{% /cloud %}} + +The `influx replication delete` command deletes an InfluxDB replication stream. + +## Usage + +``` +influx replication delete [command options] [arguments...] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :-------------------------------------------------------------------- | :--------: | :-------------------- | +| `-i` | `--id` | Replication stream ID to delete | string | | +| | `--host` | InfluxDB HTTP address (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--http-debug` | Inspect communication with InfluxDB servers | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| `-t` | `--token` | InfluxDB API token | string | `INFLUX_TOKEN` | + +## Examples +{{< cli/influx-creds-note >}} + +### Delete a replication +1. Use `influx replication list` to get the ID for the replication you want to delete. + ```sh + $ influx replication list + ID Name Org ID + 0ooxX0xxXo0x myreplication [...] + ``` +2. Use the following command to delete the replication: + ```sh + influx replication delete --id 0ooxX0xxXo0x + ``` diff --git a/content/influxdb/v2.5/reference/cli/influx/replication/list.md b/content/influxdb/v2.5/reference/cli/influx/replication/list.md new file mode 100644 index 000000000..cdf0fb295 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/replication/list.md @@ -0,0 +1,54 @@ +--- +title: influx replication list +description: List InfluxDB replication streams and corresponding metrics. +menu: + influxdb_2_5_ref: + name: influx replication list + parent: influx replication +weight: 102 +influxdb/v2.5/tags: [write, replication] +--- + +{{% cloud %}} +Replication remotes and replication streams can only be configured for InfluxDB OSS. +{{% /cloud %}} + + +The `influx replication list` command lists all InfluxDB replication streams and their corresponding metrics. + +## Usage +``` +influx replication list [command options] [arguments...] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :------------------ | :-------------------------------------------------------------------- | :--------: | :-------------------- | +| `-n` | `--name` | Filter replication streams by name | string | | +| | `--org-id` | Local organization ID | string | `INFLUX_ORG_ID` | +| `-o` | `--org` | Local organization name | string | `INFLUX_ORG` | +| | `--remote-id` | Filter replication streams by remote connection ID | string | | +| | `--local-bucket-id` | Filter replication streams by local bucket | string | | +| | `--host` | InfluxDB HTTP address (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--http-debug` | Inspect communication with InfluxDB servers | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| `-t` | `--token` | InfluxDB API token | string | `INFLUX_TOKEN` | + +## Examples +{{< cli/influx-creds-note >}} + +### List all replication streams + +```sh +influx replication list --org-id --token +``` + +### List a replication stream by name + +```sh +influx replication list --name example-replication-name +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/replication/update.md b/content/influxdb/v2.5/reference/cli/influx/replication/update.md new file mode 100644 index 000000000..5ed62210c --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/replication/update.md @@ -0,0 +1,65 @@ +--- +title: influx replication update +description: Update InfluxDB replication streams. +menu: + influxdb_2_5_ref: + name: influx replication update + parent: influx replication +weight: 102 +influxdb/v2.5/tags: [write, replication] +--- + +{{% cloud %}} +Replication remotes and replication streams can only be configured for InfluxDB OSS. +{{% /cloud %}} + +The `influx replication update` command updates an InfluxDB replication stream. + +## Usage +``` +influx replication update [command options] [arguments...] +``` + +## Flag + +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :----------------------------- | :------------------------------------------------------------------------------------- | :--------: | :-------------------- | +| `-i` | `--id` | Replication stream ID to update | string | | +| `-n` | `--name` | New replication stream name | string | | +| `-d` | `--description` | New replication stream description | string | | +| | `--remote-id` | New remote connection ID to send data to | string | | +| | `--remote-bucket` | Remote bucket name to replicate data to (mutually exclusive with `--remote-bucket-id`) | string | | +| | `--remote-bucket-id` | Remote bucket ID to replicate data to (mutually exclusive with `--remote-bucket-name`) | string | | +| | `--max-queue-bytes` | New max queue size in bytes (default: `0`) | integer | | +| | `--drop-non-retryable-data` | Drop data when a non-retryable error is encountered | | | +| | `--no-drop-non-retryable-data` | Do not drop data when a non-retryable error is encountered | | | +| | `--max-age` | Specify a maximum age (in seconds) for data before it is dropped | integer | | +| | `--host` | InfluxDB HTTP address (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--http-debug` | Inspect communication with InfluxDB servers | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| `-t` | `--token` | InfluxDB API token | string | `INFLUX_TOKEN` | + +## Example +{{< cli/influx-creds-note >}} + +### Update a replication +1. Use `influx replication list` to get the ID for the replication you want to update. + ```sh + $ influx replication list + ID Name Org ID + 0ooxX0xxXo0x myreplication [...] + ``` +2. Use the following command to update the replication: + ```sh + influx replication update \ + --id 0ooxX0xxXo0x + --name new-replication-name + --description new-replication-description + --replication-url http://new-replication-url.com + --replication-api-token new-replication-api-token + --replication-org-id new-replication-org-id + ``` diff --git a/content/influxdb/v2.5/reference/cli/influx/restore/index.md b/content/influxdb/v2.5/reference/cli/influx/restore/index.md new file mode 100644 index 000000000..df6734633 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/restore/index.md @@ -0,0 +1,96 @@ +--- +title: influx restore +description: The `influx restore` command restores backup data and metadata from an InfluxDB backup directory. +influxdb/v2.5/tags: [restore] +menu: + influxdb_2_5_ref: + parent: influx +weight: 101 +aliases: + - /influxdb/v2.5/reference/cli/influxd/restore/ + - /influxdb/v2.5/administration/backup_and_restore/ +related: + - /influxdb/v2.5/backup-restore/restore/ + - /influxdb/v2.5/reference/cli/influx/backup/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +updated_in: CLI v2.0.7 +--- + +The `influx restore` command restores backup data and metadata from an InfluxDB OSS backup directory. + +### The restore process +When restoring data from a backup file set, InfluxDB temporarily moves existing +data and metadata while `restore` runs. +After `restore` completes, the temporary data is deleted. +If the restore process fails, InfluxDB preserves the data in the temporary location. + +_For information about recovering from a failed restore process, see +[Restore data](/influxdb/v2.5/backup-restore/restore/#recover-from-a-failed-restore)._ + +{{% note %}} +#### Cannot restore to existing buckets +The `influx restore` command cannot restore data to existing buckets. +Use the `--new-bucket` flag to create a bucket with a new name and restore data into it. +To restore data and retain bucket names, [delete existing buckets](/influxdb/v2.5/organizations/buckets/delete-bucket/) +and then begin the restore process. +{{% /note %}} + +## Usage + +``` +influx restore [flags] +``` + +## Flags + +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| `-b` | `--bucket` | Name of the bucket to restore (mutually exclusive with `--bucket-id`) | string | | +| | `--bucket-id` | ID of the bucket to restore (mutually exclusive with `--bucket`) | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| | `--full` | Fully restore and replace all data on server | | | +| `-h` | `--help` | Help for the `restore` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--new-bucket` | Name of the bucket to restore to | string | | +| | `--new-org` | Name of the organization to restore to | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +- [Restore backup data](#restore-backup-data) +- [Restore backup data for a specific bucket into a new bucket](#restore-backup-data-for-a-specific-bucket-into-a-new-bucket) +- [Restore and replace all data](#restore-and-replace-all-data) + +##### Restore backup data +```sh +influx restore /path/to/backup/dir/ +``` + +##### Restore backup data for a specific bucket into a new bucket +```sh +influx restore \ + --bucket example-bucket \ + --new-bucket new-example-bucket \ + /path/to/backup/dir/ +``` + +##### Restore and replace all data +{{% note %}} +`influx restore --full` restores all time series data _and_ InfluxDB key-value +data such as tokens, dashboards, users, etc. +{{% /note %}} + +```sh +influx restore --full /path/to/backup/dir/ +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/scripts/_index.md b/content/influxdb/v2.5/reference/cli/influx/scripts/_index.md new file mode 100644 index 000000000..ecce459c3 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/scripts/_index.md @@ -0,0 +1,38 @@ +--- +title: influx scripts +description: The `influx scripts` command and its subcommands manage invokable scripts in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx scripts + parent: influx +weight: 101 +influxdb/v2.5/tags: [scripts] +cascade: + related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + - /influxdb/cloud/api-guide/api-invokable-scripts/ + metadata: [influx CLI 2.4.0+, InfluxDB Cloud only] +--- + +The `influx scripts` command and its subcommands manage [invokable scripts](/influxdb/cloud/api-guide/api-invokable-scripts/) in InfluxDB. + +### Usage +``` +influx scripts [command] +``` + +### Subcommands +| Subcommand | Description | +|:---------- |:----------- | +| [create](/influxdb/v2.5/reference/cli/influx/scripts/create) | Create script | +| [delete](/influxdb/v2.5/reference/cli/influx/scripts/delete) | Delete script | +| [invoke](/influxdb/v2.5/reference/cli/influx/scripts/invoke) | Invoke script | +| [list](/influxdb/v2.5/reference/cli/influx/scripts/list) | List scripts | +| [retrieve](/influxdb/v2.5/reference/cli/influx/scripts/retrieve) | Retrieve script | +| [update](/influxdb/v2.5/reference/cli/influx/scripts/update) | Update script | + +### Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `scripts` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/scripts/create.md b/content/influxdb/v2.5/reference/cli/influx/scripts/create.md new file mode 100644 index 000000000..eeaf21bf6 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/scripts/create.md @@ -0,0 +1,64 @@ +--- +title: influx scripts create +description: The `influx scripts create` command creates an invokable script in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx scripts create + parent: influx scripts +weight: 201 +--- + +The `influx scripts create` command creates an invokable script in InfluxDB. + +## Usage +``` +influx scripts create [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :-------------------------------------------------------------------- | :--------: | :-------------------- | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-d` | `--description` | ({{< req >}}) Purpose or functionality of the script | string | | +| `-f` | `--file` | Path to file containing the script to be executed | string | | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-l` | `--language` | ({{< req >}}) Language the script is written in | string | | +| `-n` | `--name` | ({{< req >}}) Script name of the script | string | | +| `-s` | `--script` | Contents of the script to be executed | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Example + +{{< cli/influx-creds-note >}} + +##### Create a script using raw Flux +```sh +export FLUX_SCRIPT=' + from(bucket: "example-bucket") + |> range(start: -10h) + |> filter(fn: (r) => r._measurement == "m") + |> aggregateWindow(every: 1h, fn: mean) + |> to(bucket: "default-ds-1d", org: "my-org") +' + +influx scripts create \ + -n "example-script" \ + -d "a simple example" \ + -l "flux" \ + -s $FLUX_SCRIPT +``` + +##### Create a script from a file +```sh +influx scripts create \ + -n "example-script" \ + -d "a simple example" \ + -l "flux" \ + -f /path/to/example-script.flux +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/scripts/delete.md b/content/influxdb/v2.5/reference/cli/influx/scripts/delete.md new file mode 100644 index 000000000..8a876bf19 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/scripts/delete.md @@ -0,0 +1,41 @@ +--- +title: influx scripts delete +description: The `influx scripts delete` command deletes an invokable script in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx scripts delete + parent: influx scripts +weight: 201 +--- + +The `influx scripts delete` command deletes an invokable script in InfluxDB. + +## Usage +``` +influx scripts delete [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `delete` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-i` | `--scriptID` | ({{< req >}}) Script ID | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Delete a script +```sh +influx scripts delete -i 0Xx0oox00XXoxxoo1 +``` + + diff --git a/content/influxdb/v2.5/reference/cli/influx/scripts/invoke.md b/content/influxdb/v2.5/reference/cli/influx/scripts/invoke.md new file mode 100644 index 000000000..660f5d452 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/scripts/invoke.md @@ -0,0 +1,51 @@ +--- +title: influx scripts invoke +description: The `influx scripts invoke` command executes an invokable script in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx scripts invoke + parent: influx scripts +weight: 201 +--- + +The `influx scripts invoke` command executes an invokable script in InfluxDB. + +## Usage +``` +influx scripts invoke [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-f` | `--file` | File name containing the script parameters, in JSON | string | | +| `-h` | `--help` | Help for the `delete` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-p` | `--params` | JSON string containing script parameters | string | | +| `-i` | `--scriptID` | ({{< req >}}) Script ID | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +- [Invoke a script](#invoke-a-script) +- [Invoke a script with parameters](#invoke-a-script-with-parameters) + +##### Invoke a script +```sh +influx scripts invoke -i 0Xx0oox00XXoxxoo1 +``` + +##### Invoke a script with parameters +```sh +influx scripts invoke \ + -i 0Xx0oox00XXoxxoo1 \ + -p "{ \"myParameter\": \"example-data\" }" +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/scripts/list.md b/content/influxdb/v2.5/reference/cli/influx/scripts/list.md new file mode 100644 index 000000000..78dada5e2 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/scripts/list.md @@ -0,0 +1,48 @@ +--- +title: influx scripts list +description: The `influx scripts list` command lists and searches for invokable scripts in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx scripts list + parent: influx scripts +weight: 201 +--- + +The `influx scripts list` command lists and searches for invokable scripts in InfluxDB. + +## Usage +``` +influx scripts list [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :-------------------------------------------------------------------- | :--------: | :-------------------- | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-l` | `--limit` | Number of scripts to return (default `0`) | integer | | +| `-o` | `--offset` | Pagination offset (default `0`) | integer | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +- [List all invokable scripts](#list-all-invokable-scripts) +- [Limit the number of invokable scripts returned to 20](#limit-the-number-of-invokable-scripts-returned-to-20) + +##### List all invokable scripts +```sh +influx scripts list +``` + +##### Limit the number of invokable scripts returned to 20 +```sh +influx scripts list --limit 20 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/scripts/retrieve.md b/content/influxdb/v2.5/reference/cli/influx/scripts/retrieve.md new file mode 100644 index 000000000..1ac730648 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/scripts/retrieve.md @@ -0,0 +1,39 @@ +--- +title: influx scripts retrieve +description: The `influx scripts retrieve` command retrieves invokable script information from InfluxDB. +menu: + influxdb_2_5_ref: + name: influx scripts retrieve + parent: influx scripts +weight: 201 +--- + +The `influx scripts retrieve` command retrieves invokable script information from InfluxDB. + +## Usage +``` +influx scripts retrieve [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `delete` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-i` | `--scriptID` | ({{< req >}}) Script ID | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Retrieve a script +```sh +influx scripts retrieve -i 0Xx0oox00XXoxxoo1 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/scripts/update.md b/content/influxdb/v2.5/reference/cli/influx/scripts/update.md new file mode 100644 index 000000000..d5b0935d4 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/scripts/update.md @@ -0,0 +1,52 @@ +--- +title: influx scripts update +description: The `influx scripts update` command updates information related to an invokable script in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx scripts update + parent: influx scripts +weight: 201 +--- + +The `influx scripts update` command updates information related to an invokable script in InfluxDB. + +## Usage +``` +influx scripts update [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-d` | `--description` | New script description | string | | +| `-h` | `--help` | Help for the `update` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-n` | `--name` | New script name | string | | +| `-s` | `--script` | New script contents | string | | +| `-i` | `--scriptID` | ({{< req >}}) Script ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Example + +{{< cli/influx-creds-note >}} + +##### Update the source code of an invokable script +```sh +export UPDATED_FLUX=' + from(bucket: "example-bucket") + |> range(start: -10h) + |> filter(fn: (r) => r._measurement == "m") + |> aggregateWindow(every: 1h, fn: mean) + |> to(bucket: "default-ds-1d", org: "my-org") +' + +influx scripts update \ + -i 0Xx0oox00XXoxxoo1 \ + -s $UPDATED_FLUX +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/secret/_index.md b/content/influxdb/v2.5/reference/cli/influx/secret/_index.md new file mode 100644 index 000000000..b78828c25 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/secret/_index.md @@ -0,0 +1,36 @@ +--- +title: influx secret +description: The `influx secret` command manages secrets. +menu: + influxdb_2_5_ref: + name: influx secret + parent: influx +weight: 101 +influxdb/v2.5/tags: [secrets] +cascade: + related: + - /influxdb/v2.5/security/secrets/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx secret` command manages [secrets](/influxdb/v2.5/reference/glossary/#secret). + +## Usage +``` +influx secret [flags] +influx secret [subcommand] +``` + +## Subcommands +| Subcommand | Description | +|:---------- |:----------- | +| [delete](/influxdb/v2.5/reference/cli/influx/secret/delete/) | Delete a secret | +| [list](/influxdb/v2.5/reference/cli/influx/secret/list/) | List secrets | +| [update](/influxdb/v2.5/reference/cli/influx/secret/update/) | Add or update a secret | + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `secret` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/secret/delete.md b/content/influxdb/v2.5/reference/cli/influx/secret/delete.md new file mode 100644 index 000000000..fe7301f5c --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/secret/delete.md @@ -0,0 +1,42 @@ +--- +title: influx secret delete +description: The `influx secret delete` command deletes secrets. +menu: + influxdb_2_5_ref: + name: influx secret delete + parent: influx secret +weight: 101 +influxdb/v2.5/tags: [secrets] +--- + +The `influx secret delete` command deletes secrets. + +## Usage +``` +influx secret delete [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `delete` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-k` | `--key` | ({{< req >}}) Secret key | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Delete a secret +```sh +influx secret delete --key EXAMPLE_SECRET_KEY +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/secret/list.md b/content/influxdb/v2.5/reference/cli/influx/secret/list.md new file mode 100644 index 000000000..88221c637 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/secret/list.md @@ -0,0 +1,44 @@ +--- +title: influx secret list +description: The `influx secret list` command lists secret keys. +menu: + influxdb_2_5_ref: + name: influx secret list + parent: influx secret +weight: 101 +influxdb/v2.5/tags: [secrets] +--- + +The `influx secret list` command lists secret keys. + +## Usage +``` +influx secret list [flags] +``` + +#### Command aliases +`list`, `ls`, `find` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### List all secret keys +```sh +influx secret list +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/secret/update.md b/content/influxdb/v2.5/reference/cli/influx/secret/update.md new file mode 100644 index 000000000..adb20b9bb --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/secret/update.md @@ -0,0 +1,61 @@ +--- +title: influx secret update +description: The `influx secret update` command adds and updates secrets. +menu: + influxdb_2_5_ref: + name: influx secret update + parent: influx secret +weight: 101 +influxdb/v2.5/tags: [secrets] +--- + +The `influx secret update` command adds and updates secrets. +Provide the secret key with the `-k` or `--key` flag. +You may also provide the secret value with the `-v` or `--value` flag. +If you do not provide the secret value with the `-v` or `--value` flag, +enter the value when prompted. + +{{% warn %}} +Providing a secret value with the `-v` or `--value` flag may expose the secret +in your command history. +{{% /warn %}} + +## Usage +``` +influx secret update [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `update` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-k` | `--key` | ({{< req >}}) Secret key | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| `-v` | `--value` | ({{< req >}}) Secret value | string | | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Add a secret +```sh +influx secret update \ + --key EXAMPLE_KEY \ + --value EXAMPLE_VALUE +``` + +##### Update an existing secret +```sh +influx secret update \ + --key EXAMPLE_KEY \ + --value NEW_EXAMPLE_VALUE +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/server-config/_index.md b/content/influxdb/v2.5/reference/cli/influx/server-config/_index.md new file mode 100644 index 000000000..7e00c18c2 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/server-config/_index.md @@ -0,0 +1,55 @@ +--- +title: influx server-config +description: The `influx server-config` command displays the runtime server configuration. +menu: + influxdb_2_5_ref: + name: influx server-config + parent: influx +weight: 101 +influxdb/v2.5/tags: [config] +cascade: + related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + metadata: [influx CLI 2.3.0+, InfluxDB 2.0.0+] +aliases: + - /influxdb/v2.2/reference/cli/influxd/print-config/ +--- + +The `influx server-config` command displays the InfluxDB runtime [server configuration](/influxdb/v2.5/reference/config-options/). + +{{% note %}} +To display the server configuration, you must use an [operator token](/influxdb/v2.5/security/tokens/#operator-token). +{{% /note %}} + +## Usage +``` +influx server-config [flags] +influx server-config [command] +``` + +## Examples +```sh +# Show the server configuration. +influx server-config + +# Show the server configuration as YAML. +influx server-config --yaml +``` + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers | string | | +| `-i` | `--id` | Organization ID | string | `INFLUX_ORG` | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | Organization name | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| | `--toml` | Output configuration as TOML instead of JSON | +| | `--yaml` | Output configuration as YAML instead of JSON | diff --git a/content/influxdb/v2.5/reference/cli/influx/setup/_index.md b/content/influxdb/v2.5/reference/cli/influx/setup/_index.md new file mode 100644 index 000000000..98fc19fb6 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/setup/_index.md @@ -0,0 +1,70 @@ +--- +title: influx setup +description: > + The `influx setup` command walks through the initial InfluxDB OSS setup process, + creating a default user, organization, and bucket. +menu: + influxdb_2_5_ref: + name: influx setup + parent: influx +weight: 101 +influxdb/v2.5/tags: [get-started] +related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +updated_in: CLI v2.0.3 +canonical: /{{< latest "influxdb" "v2" >}}/reference/cli/influx/setup/ +--- + +The `influx setup` command walks through the initial InfluxDB OSS setup process, +creating a default user, organization, and bucket. + +{{% note %}} +The **Operator token** created in the InfluxDB setup process has full read and write +access to all organizations in the database. +{{% /note %}} + +## Usage +``` +influx setup [flags] +``` + +## Flags +| Flag | | Description | Data type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:---------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| `-b` | `--bucket` | Primary bucket name | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-f` | `--force` | Skip confirmation prompt | | | +| `-h` | `--help` | Help for the `setup` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-o` | `--org` | Primary organization name | string | | +| `-p` | `--password` | Password for primary user | string | | +| `-r` | `--retention` | Duration bucket will retain data (0 is infinite, default is 0) | duration | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | Token for admin user (auto-generated by default) | string | `INFLUX_TOKEN` | +| `-u` | `--username` | Primary username | string | | + +{{% note %}} +Valid `--retention` units are nanoseconds (`ns`), microseconds (`us` or `µs`), +milliseconds (`ms`), seconds (`s`), minutes (`m`), hours (`h`), days (`d`), and weeks (`w`). +{{% /note %}} + +## Examples + +##### Start interactive InfluxDB setup +```sh +influx setup +``` + +##### Set up InfluxDB with all required information and skip confirmation +```sh +influx setup \ + --org example-org \ + --bucket example-bucket \ + --username example-user \ + --password ExAmPl3PA55W0rD \ + --force +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/stacks/_index.md b/content/influxdb/v2.5/reference/cli/influx/stacks/_index.md new file mode 100644 index 000000000..f583d35b1 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/stacks/_index.md @@ -0,0 +1,79 @@ +--- +title: influx stacks +description: > + The `influx stacks` command and its subcommands list and manage InfluxDB stacks + and associated resources. +menu: + influxdb_2_5_ref: + name: influx stacks + parent: influx +weight: 101 +aliases: + - /influxdb/v2.5/reference/cli/influx/pkg/stack/list/ +influxdb/v2.5/tags: [templates] +cascade: + related: + - /influxdb/v2.5/influxdb-templates/stacks/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + metadata: [influx CLI 2.0.1+, InfluxDB 2.0.1+] +--- + +The `influx stacks` command and its subcommands list and manage InfluxDB stacks +and associated resources. + +## Usage +``` +influx stacks [flags] +influx stacks [command] +``` + +## Subcommands +| Subcommand | Description | +|:------- |:----------- | +| [init](/influxdb/v2.5/reference/cli/influx/stacks/init/) | Initialize a stack | +| [remove](/influxdb/v2.5/reference/cli/influx/stacks/remove/) | Remove a stack | + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:-----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `stacks` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--stack-id` | Stack IDs to filter by | stringArray | | +| | `--stack-name` | Stack names to filter by | stringArray | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +- [List all stacks](#list-all-stacks) +- [Filter stacks by name](#filter-stacks-by-name) +- [Filter stacks by ID](#filter-stacks-by-id) + +##### List all stacks +```sh +influx stacks +``` + +##### Filter stacks by name +```sh +influx stacks \ + --stack-name stack1 \ + --stack-name stack2 +``` + +##### Filter stacks by ID +```sh +influx stacks \ + --stack-id 0Xx0oox00XXoxxoo1 \ + --stack-id 0Xx0oox00XXoxxoo2 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/stacks/init.md b/content/influxdb/v2.5/reference/cli/influx/stacks/init.md new file mode 100644 index 000000000..242a6f66d --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/stacks/init.md @@ -0,0 +1,53 @@ +--- +title: influx stacks init +description: The `influx stacks init` command initializes an InfluxDB stack. +menu: + influxdb_2_5_ref: + name: influx stacks init + parent: influx stacks +weight: 201 +aliases: + - /influxdb/v2.5/reference/cli/influx/pkg/stack/init/ +influxdb/v2.5/tags: [templates] +--- + +The `influx stacks init` command initializes an InfluxDB stack. + +## Usage +``` +influx stacks init [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:----------------------|:----------------------------------------------------------------------|:---------------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `init` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-d` | `--stack-description` | Stack description | string | | +| `-n` | `--stack-name` | Stack name | string | | +| `-u` | `--template-url` | Template URLs to associate with a stack | list of strings | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Initialize a stack with a name and description + +```sh +influx stacks init -n "Example Stack" -d "InfluxDB stack for monitoring some awesome stuff" +``` + +##### Initialize a stack with a name and URLs to associate with the stack + +```sh +influx stacks init -n "Example Stack" -u https://example.com/template-1.yml +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/stacks/remove.md b/content/influxdb/v2.5/reference/cli/influx/stacks/remove.md new file mode 100644 index 000000000..5ed046c31 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/stacks/remove.md @@ -0,0 +1,48 @@ +--- +title: influx stacks remove +description: The `influx stacks remove` command removes an InfluxDB stack and all associated resources. +menu: + influxdb_2_5_ref: + name: influx stacks remove + parent: influx stacks +weight: 201 +aliases: + - /influxdb/v2.5/reference/cli/influx/pkg/stack/remove/ +influxdb/v2.5/tags: [templates] +--- + +The `influx stacks remove` command removes an InfluxDB stack and all associated resources. + +## Usage +``` +influx stacks remove [flags] +``` + +#### Command aliases +`remove`, `rm`, `uninstall` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:---------------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| | `--force` | Skip confirmation prompt. | | | +| `-h` | `--help` | Help for the `remove` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--stack-id` | Stack IDs to remove | list of strings | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Remove a stack and all of its associated resources +```sh +influx stacks remove --stack-id 0Xx0oox00XXoxxoo1 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/stacks/update.md b/content/influxdb/v2.5/reference/cli/influx/stacks/update.md new file mode 100644 index 000000000..780cc10bc --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/stacks/update.md @@ -0,0 +1,89 @@ +--- +title: influx stacks update +description: The 'influx stacks update' command updates an InfluxDB stack. +menu: + influxdb_2_5_ref: + name: influx stacks update + parent: influx stacks +weight: 201 +influxdb/v2.5/tags: [templates] +updated_in: CLI v2.0.4 +--- + +The `influx stacks update` command updates an InfluxDB stack. + +## Usage +``` +influx stacks update [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:----------------------|:----------------------------------------------------------------------|:---------------:|:----------------------| +| | `--addResource` | Associate an existing resource with a stack | string | | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-f` | `--export-file` | Destination for exported template | string | | +| `-h` | `--help` | Help for the `update` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--stack-id` | The stack ID to update | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-d` | `--stack-description` | Stack description | string | | +| `-n` | `--stack-name` | Stack name | string | | +| `-u` | `--template-url` | Template URLs to associate with a stack | list of strings | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +{{% warn %}} +#### Export an updated template +To prevent accidental changes, we **strongly recommend** exporting a new template +any time you add additional resources to a stack using the `--addResource` flag +with the `influx stack update` command. +The updated stack will differ from the previous template. +If you apply the outdated template, InfluxDB will revert the updates and remove +the added resources. +{{% /warn %}} + +## Examples + +{{< cli/influx-creds-note >}} + +- [Update a stack with a name and description](#update-a-stack-with-a-name-and-description) +- [Update a stack with a name and urls to associate with stack](#update-a-stack-with-a-name-and-urls-to-associate-with-stack) +- [Update a stack with new resources to manage](#update-a-stack-with-new-resources-to-manage) +- [Update a stack with new resources and export the stack as a template](#update-a-stack-with-new-resources-and-export-the-stack-as-a-template) + +##### Update a stack with a name and description +```sh +influx stack update \ + -i ab12cd34ef56 \ + -n "New stack name" \ + -d "New stack description" +``` + +##### Update a stack with a name and URLs to associate with stack +```sh +influx stack update \ + -i ab12cd34ef56 \ + -n "New stack name" \ + --template-url https://example.com/template-1.yml \ + --template-url https://example.com/template-2.yml +``` + +##### Update a stack with new resources to manage +```sh +influx stacks update \ + --stack-id ab12cd34ef56 \ + --addResource=Bucket=12ab34cd56ef \ + --addResource=Dashboard=98zy76xw54vu +``` + +##### Update a stack with new resources and export the stack as a template +```sh +influx stacks update \ + --stack-id ab12cd34ef56 \ + --addResource=Bucket=12ab34cd56ef \ + --export-file /path/to/template-file.yml +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/task/_index.md b/content/influxdb/v2.5/reference/cli/influx/task/_index.md new file mode 100644 index 000000000..19d70a50c --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/task/_index.md @@ -0,0 +1,40 @@ +--- +title: influx task +description: The `influx task` command and its subcommands manage tasks in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx task + parent: influx +weight: 101 +influxdb/v2.5/tags: [tasks] +cascade: + related: + - /influxdb/v2.5/process-data/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx task` command and its subcommands manage tasks in InfluxDB. + +### Usage +``` +influx task [flags] +influx task [command] +``` + +### Subcommands +| Subcommand | Description | +|:---------- |:----------- | +| [create](/influxdb/v2.5/reference/cli/influx/task/create) | Create task | +| [delete](/influxdb/v2.5/reference/cli/influx/task/delete) | Delete task | +| [list](/influxdb/v2.5/reference/cli/influx/task/list) | List tasks | +| [log](/influxdb/v2.5/reference/cli/influx/task/log) | Log related commands | +| [retry-failed](/influxdb/v2.5/reference/cli/influx/task/retry-failed) | Retry failed task runs | +| [run](/influxdb/v2.5/reference/cli/influx/task/run) | Run related commands | +| [update](/influxdb/v2.5/reference/cli/influx/task/update) | Update task | + +### Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `task` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/task/create.md b/content/influxdb/v2.5/reference/cli/influx/task/create.md new file mode 100644 index 000000000..99d551022 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/task/create.md @@ -0,0 +1,73 @@ +--- +title: influx task create +description: The `influx task create` command creates a task in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx task create + parent: influx task +weight: 201 +updated_in: CLI v2.4.0 +--- + +The `influx task create` command creates a task in InfluxDB. + +## Usage +``` +influx task create [task literal] [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-r` | `--cron` | _(InfluxDB Cloud only)_ Cron expression to define when the task runs | string | | +| `-e` | `--every` | _(InfluxDB Cloud only)_ Interval at which the task runs | string | | +| `-f` | `--file` | Path to Flux script file | string | | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | _(InfluxDB Cloud only)_ Task name | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--script-id` | _(InfluxDB Cloud only)_ Invokable script ID to execute | string | | +| | `--script-params` | _(InfluxDB Cloud only)_ Invokable script JSON parameters | string | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Example + +{{< cli/influx-creds-note >}} + +##### Create a task using raw Flux +```sh +export FLUX_TASK=' + option task = { + name: "Example Task", + every: 1d + } + + from(bucket: "example-bucket") + |> range(start: -task.every) + |> filter(fn: (r) => r._measurement == "m") + |> aggregateWindow(every: 1h, fn: mean) + |> to(bucket: "default-ds-1d", org: "my-org") +' + +influx task create $FLUX_TASK +``` + +##### Create a task from a file +```sh +influx task create --file /path/to/example-task.flux +``` + +##### Create a task using an invokable script +```sh +influx task create \ + --name "my task" \ + --every "10h" \ + --script-id 0001234 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/task/delete.md b/content/influxdb/v2.5/reference/cli/influx/task/delete.md new file mode 100644 index 000000000..297ac4fcc --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/task/delete.md @@ -0,0 +1,41 @@ +--- +title: influx task delete +description: The `influx task delete` command deletes a task in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx task delete + parent: influx task +weight: 201 +--- + +The `influx task delete` command deletes a task in InfluxDB. + +## Usage +``` +influx task delete [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `delete` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | ({{< req >}}) Task ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Delete a task +```sh +influx task delete --id 0Xx0oox00XXoxxoo1 +``` + + diff --git a/content/influxdb/v2.5/reference/cli/influx/task/list.md b/content/influxdb/v2.5/reference/cli/influx/task/list.md new file mode 100644 index 000000000..88d8a5d9c --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/task/list.md @@ -0,0 +1,68 @@ +--- +title: influx task list +description: The `influx task list` command lists and searches for tasks in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx task list + parent: influx task +weight: 201 +aliases: + - /influxdb/v2.5/reference/cli/influx/task/find +--- + +The `influx task list` command lists and searches for tasks in InfluxDB. + +## Usage +``` +influx task list [flags] +``` + +#### Command aliases +`list`, `ls`, `find` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | Task ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--limit` | Number of tasks to find (default `100`) | integer | | +| `-o` | `--org` | Task organization name | string | `INFLUX_ORG` | +| | `--org-id` | Task organization ID | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| `-n` | `--user-id` | Task owner ID | string | | + +## Examples + +{{< cli/influx-creds-note >}} + +- [List all tasks](#list-all-tasks) +- [List a specific task](#list-a-specific-task) +- [Limit the number of tasks returned to 20](#limit-the-number-of-tasks-returned-to-20) +- [List all tasks created by a specific user](#list-all-tasks-created-by-a-specific-user) + +##### List all tasks +```sh +influx task list +``` + +##### List a specific task +```sh +influx task list --id 0Xx0oox00XXoxxoo1 +``` + +##### Limit the number of tasks returned to 20 +```sh +influx task list --limit 20 +``` + +##### List all tasks created by a specific user +```sh +influx task list --user-id 0Xx0oox00XXoxxoo1 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/task/log/_index.md b/content/influxdb/v2.5/reference/cli/influx/task/log/_index.md new file mode 100644 index 000000000..b3596c6b7 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/task/log/_index.md @@ -0,0 +1,30 @@ +--- +title: influx task log +description: > + The `influx task log` and its subcommand, `list`, output log information + related to a task. +menu: + influxdb_2_5_ref: + name: influx task log + parent: influx task +weight: 201 +influxdb/v2.5/tags: [logs] +--- + +The `influx task log` command and its subcommand `list` output log information related to a task. + +## Usage +``` +influx task log [flags] +influx task log [command] +``` + +## Subcommands +| Subcommand | Description | +|:---------- |:----------- | +| [list](/influxdb/v2.5/reference/cli/influx/task/log/list) | List logs for task | + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `log` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/task/log/list.md b/content/influxdb/v2.5/reference/cli/influx/task/log/list.md new file mode 100644 index 000000000..b423267da --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/task/log/list.md @@ -0,0 +1,52 @@ +--- +title: influx task log list +description: The `influx task log list` command outputs log information related to a task. +menu: + influxdb_2_5_ref: + name: influx task log list + parent: influx task log +weight: 301 +aliases: + - /influxdb/v2.5/reference/cli/influx/task/log/list +--- + +The `influx task log list` command outputs log information related to a task. + +## Usage +``` +influx task log list [flags] +``` + +#### Command aliases +`list`, `ls`, `find` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--run-id` | Run ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--task-id` | ({{< req >}}) Task ID | string | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### List logs from all task executions +```sh +influx task log list --task-id 0Xx0oox00XXoxxoo1 +``` + +##### List logs from a specific task execution +```sh +influx task log list \ + --task-id 0Xx0oox00XXoxxoo1 \ + --run-id ox0Xx0ooxx00XXoo2 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/task/retry-failed.md b/content/influxdb/v2.5/reference/cli/influx/task/retry-failed.md new file mode 100644 index 000000000..e56bbb4f5 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/task/retry-failed.md @@ -0,0 +1,98 @@ +--- +title: influx task retry-failed +description: The `influx task retry-failed` command retries failed InfluxDB task runs. +menu: + influxdb_2_5_ref: + name: influx task retry-failed + parent: influx task +weight: 201 +metadata: [influx CLI 2.0.5+, InfluxDB 2.0.5+] +--- + +The `influx task retry-failed` command retries failed InfluxDB task runs. +## Usage +``` +influx task retry-failed [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:--------------------------------------------------------------------------|:----------:|:----------------------| +| | `--after` | Retry task runs that occurred after this time (RFC3339 timestamp) | string | | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| | `--before` | Retry task runs that occurred before this time (RFC3339 timestamp) | string | | +| | `--dry-run` | Print information about task runs that would be retried | | | +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | Task ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Task organization name | string | `INFLUX_ORG` | +| | `--org-id` | Task organization ID | string | `INFLUX_ORG_ID` | +| | `--run-limit` | Maximum number of failed runs to retry per task (`1-500`, default `100`) | integer | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--task-limit` | Maximum number of tasks to retry failed runs for (`1-500`, default `100`) | integer | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +{{% note %}} +#### Required permissions +Use an [{{% oss-only %}}**Operator** or{{% /oss-only %}} **All-Access** token](/influxdb/v2.5/security/tokens/) to retry failed tasks. +{{% /note %}} + +- [Retry failed task runs for a specific task ID](#retry-failed-task-runs-for-a-specific-task-id) +- [Retry failed task runs that occurred before a specific time](#retry-failed-task-runs-that-occurred-before-a-specific-time) +- [Retry failed task runs that occurred after a specific time](#retry-failed-task-runs-that-occurred-after-a-specific-time) +- [Retry failed task runs that occurred in a specific time range](#retry-failed-task-runs-that-occurred-in-a-specific-time-range) +- [Retry failed runs for a limited number of tasks](#retry-failed-runs-for-a-limited-number-of-tasks) +- [Retry a limited number of failed runs for a task](#retry-a-limited-number-of-failed-runs-for-a-task) +- [Print information about runs that will be retried](#print-information-about-runs-that-will-be-retried) + +##### Retry failed task runs for a specific task ID +```sh +influx task retry-failed \ + --id 0Xx0oox00XXoxxoo1 +``` + +##### Retry failed task runs that occurred before a specific time +```sh +influx task retry-failed \ + --before 2021-01-01T00:00:00Z +``` + +##### Retry failed task runs that occurred after a specific time +```sh +influx task retry-failed \ + --after 2021-01-01T00:00:00Z +``` + +##### Retry failed task runs that occurred in a specific time range +```sh +influx task retry-failed \ + --after 2021-01-01T00:00:00Z \ + --before 2021-01-01T23:59:59Z +``` + +##### Retry failed runs for a limited number of tasks +```sh +influx task retry-failed \ + --task-limit 5 +``` + +##### Retry a limited number of failed runs for a task +```sh +influx task retry-failed \ + --id 0Xx0oox00XXoxxoo1 \ + --run-limit 5 +``` + +##### Print information about runs that will be retried +```sh +influx task retry-failed \ + --dry-run +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/task/run/_index.md b/content/influxdb/v2.5/reference/cli/influx/task/run/_index.md new file mode 100644 index 000000000..12b2d5fb0 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/task/run/_index.md @@ -0,0 +1,30 @@ +--- +title: influx task run +description: > + The `influx task run` command and its subcommand, `list` output information + related to runs of a task. +menu: + influxdb_2_5_ref: + name: influx task run + parent: influx task +weight: 201 +--- + +The `influx task run` command and its subcommand `list` output information related to runs of a task. + +## Usage +``` +influx task run [flags] +influx task run [command] +``` + +## Subcommands +| Subcommand | Description | +|:---------- |:----------- | +| [list](/influxdb/v2.5/reference/cli/influx/task/run/list) | List runs for a task | +| [retry](/influxdb/v2.5/reference/cli/influx/task/run/retry) | Retry a task | + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `run` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/task/run/list.md b/content/influxdb/v2.5/reference/cli/influx/task/run/list.md new file mode 100644 index 000000000..867584233 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/task/run/list.md @@ -0,0 +1,66 @@ +--- +title: influx task run list +description: The `influx task run list` command outputs information related to runs of a task. +menu: + influxdb_2_5_ref: + name: influx task run list + parent: influx task run +weight: 301 +aliases: + - /influxdb/v2.5/reference/cli/influx/task/run/find +--- + +The `influx task run list` command outputs information related to runs of a task. + +## Usage +``` +influx task run list [flags] +``` + +#### Command aliases +`list`, `ls`, `find` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--after` | After-time for filtering | string | | +| | `--before` | Before-time for filtering | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--limit` | Limit the number of results | integer | | +| | `--run-id` | Run ID | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--task-id` | ({{< req >}}) Task ID | string | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +- [List all runs of a task](#list-all-runs-of-a-task) +- [List a specific run of a task](#list-a-specific-run-of-a-task) +- [Limit the number of returned task runs to 20](#limit-the-number-of-returned-task-runs-to-20) + +##### List all runs of a task +```sh +influx task run list --task-id 0Xx0oox00XXoxxoo1 +``` + +##### List a specific run of a task +```sh +influx task run list \ + --task-id 0Xx0oox00XXoxxoo1 \ + --run-id ox0Xx0ooxx00XXoo2 +``` + +##### Limit the number of returned task runs to 20 +```sh +influx task run list \ + --task-id 0Xx0oox00XXoxxoo1 \ + --limit 20 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/task/run/retry.md b/content/influxdb/v2.5/reference/cli/influx/task/run/retry.md new file mode 100644 index 000000000..c86a4883c --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/task/run/retry.md @@ -0,0 +1,45 @@ +--- +title: influx task run retry +description: The `influx task run retry` command retries to run a task in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx task run retry + parent: influx task run +weight: 301 +--- + +The `influx task run retry` command retries to run a task in InfluxDB. + +## Usage +``` +influx task run retry [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `retry` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-r` | `--run-id` | ({{< req >}}) Run ID | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-i` | `--task-id` | ({{< req >}}) Task ID | string | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +{{% note %}} +#### Required permissions +Use an [{{% oss-only %}}**Operator** or{{% /oss-only %}} **All-Access** token](/influxdb/v2.5/security/tokens/) to retry tasks. +{{% /note %}} + +##### Retry a task run +```sh +influx task run retry \ + --task-id 0Xx0oox00XXoxxoo1 \ + --run-id ox0Xx0ooxx00XXoo2 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/task/update.md b/content/influxdb/v2.5/reference/cli/influx/task/update.md new file mode 100644 index 000000000..4abd11e90 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/task/update.md @@ -0,0 +1,93 @@ +--- +title: influx task update +description: The `influx task update` command updates information related to tasks in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx task update + parent: influx task +weight: 201 +updated_in: CLI v2.4.0 +--- + +The `influx task update` command updates information related to tasks in InfluxDB. + +## Usage +``` +influx task update [task literal] [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-f` | `--file` | Path to Flux script file | string | | +| `-h` | `--help` | Help for the `update` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | string | | +| `-i` | `--id` | ({{< req >}}) Task ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--script-id` | _(InfluxDB Cloud only)_ Invokable script ID to execute | string | | +| | `--script-params` | _(InfluxDB Cloud only)_ Invokable script JSON parameters | string | | +| | `--status` | Update task status (`active` or `inactive`) | string | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Example + +{{< cli/influx-creds-note >}} + +- [Update a task from a Flux string](#update-a-task-from-a-flux-string) +- [Update a task from a Flux file](#update-a-task-from-a-flux-file) +- [Update a task from a script ID](#update-a-task-from-a-script-id) +- [Enable a task](#enable-a-task) +- [Disable a task](#disable-a-task) + +##### Update a task from a Flux string +```sh +export UPDATED_FLUX_TASK=' + option task = { + name: "Example Task", + every: 1d + } + + from(bucket: "example-bucket") + |> range(start: -task.every) + |> filter(fn: (r) => r._measurement == "m") + |> aggregateWindow(every: 1h, fn: mean) + |> to(bucket: "default-ds-1d", org: "my-org") +' + +influx task update \ + --id 0001234 \ + $UPDATED_FLUX_TASK +``` + +##### Update a task from a Flux file +```sh +influx task update \ + --id 0001234 \ + --file /path/to/example-task.flux +``` + +##### Update a task from a script ID +```sh +influx task update \ + --id 0001234 \ + --script-id 0004567 +``` + +##### Enable a task +```sh +influx task update \ + --id 0001234 \ + --status active +``` + +##### Disable a task +```sh +influx task update \ + --id 0001234 \ + --status inactive +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/telegrafs/_index.md b/content/influxdb/v2.5/reference/cli/influx/telegrafs/_index.md new file mode 100644 index 000000000..6474e0be2 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/telegrafs/_index.md @@ -0,0 +1,62 @@ +--- +title: influx telegrafs +description: > + The `influx telegrafs` command lists Telegraf configurations. + Subcommands manage Telegraf configurations. +menu: + influxdb_2_5_ref: + name: influx telegrafs + parent: influx +weight: 101 +influxdb/v2.5/tags: [telegraf] +cascade: + related: + - /influxdb/v2.5/telegraf-configs/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx telegrafs` command lists Telegraf configurations. +Subcommands manage Telegraf configurations. + +## Usage +```sh +influx telegrafs [flags] +influx telegrafs [command] +``` + +## Subcommands +| Subcommand | Description | +|:---------- |:----------- | +| [create](/influxdb/v2.5/reference/cli/influx/telegrafs/create) | Create a Telegraf configuration | +| [rm](/influxdb/v2.5/reference/cli/influx/telegrafs/rm) | Remove a Telegraf configuration | +| [update](/influxdb/v2.5/reference/cli/influx/telegrafs/update) | Update a Telegraf configuration | + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `telegrafs` command | | | +| | `--hide-headers` | Hide table headers | | `INFLUX_HIDE_HEADERS` | +| `-i` | `--id` | Telegraf configuration ID to retrieve | string | | +| | `--json` | Output data as JSON | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### List all Telegraf configurations +```sh +influx telegrafs +``` + +##### List a Telegraf configuration with the specified ID +```sh +influx telegrafs --id 0Xx0oox00XXoxxoo1 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/telegrafs/create.md b/content/influxdb/v2.5/reference/cli/influx/telegrafs/create.md new file mode 100644 index 000000000..3522fb739 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/telegrafs/create.md @@ -0,0 +1,54 @@ +--- + title: influx telegrafs create +description: > + The `influx telegrafs create` command creates a Telegraf configuration in InfluxDB + using a provided Telegraf configuration file. +menu: + influxdb_2_5_ref: + name: influx telegrafs create + parent: influx telegrafs +weight: 201 +--- + +The `influx telegrafs create` command creates a Telegraf configuration in InfluxDB +using a provided Telegraf configuration file. + +## Usage +```sh +influx telegrafs create [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-d` | `--description` | Telegraf configuration description | string | | +| `-f` | `--file` | Path to Telegraf configuration | string | | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide table headers | | `INFLUX_HIDE_HEADERS` | +| | `--json` | Output data as JSON | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | Telegraf configuration name | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Create a Telegraf configuration +```sh +influx telegrafs create \ + --name "Example configuration name" \ + --description "Example Telegraf configuration description" \ + --file /path/to/telegraf.conf +``` + +##### Create a Telegraf configuration via stdin +```sh +cat /path/to/telegraf.conf | influx telegrafs create \ + --name "Example configuration name" \ + --description "Example Telegraf configuration description" \ +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/telegrafs/rm.md b/content/influxdb/v2.5/reference/cli/influx/telegrafs/rm.md new file mode 100644 index 000000000..22a81c79b --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/telegrafs/rm.md @@ -0,0 +1,48 @@ +--- +title: influx telegrafs rm +description: > + The `influx telegrafs rm` command removes Telegraf configurations from InfluxDB. +menu: + influxdb_2_5_ref: + name: influx telegrafs rm + parent: influx telegrafs +weight: 201 +--- + +The `influx telegrafs rm` command removes Telegraf configurations from InfluxDB. + +## Usage +```sh +influx telegrafs rm [flags] +``` + +#### Command aliases +`rm`, `remove` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :-------------------------------------------------------------------- | :---------: | :-------------------- | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `rm` command | | | +| | `--hide-headers` | Hide the table headers | | `INFLUX_HIDE_HEADERS` | +| `-i` | `--id` | Telegraf configuration ID to remove | stringArray | | +| | `--json` | Output data as json | | `INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Remove a Telegraf configuration +```sh +influx telegrafs rm --id ab12cd34ef56 +``` + +##### Remove multiple Telegraf configurations +```sh +influx telegrafs rm \ + --i 0Xx0oox00XXoxxoo1 \ + --i oox0Xx0ox00XXxoo2 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/telegrafs/update.md b/content/influxdb/v2.5/reference/cli/influx/telegrafs/update.md new file mode 100644 index 000000000..0d8c40d75 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/telegrafs/update.md @@ -0,0 +1,56 @@ +--- +title: influx telegrafs update +description: > + The `influx telegrafs update` command updates a Telegraf configuration to match the specified parameters. + If a name or description are not provided, they are set to an empty string. +menu: + influxdb_2_5_ref: + name: influx telegrafs update + parent: influx telegrafs +weight: 201 +--- + +The `influx telegrafs update` command updates a Telegraf configuration to match the specified parameters. +If a name or description are not provided, they are set to an empty string. + +## Usage +```sh +influx telegrafs update [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-d` | `--description` | Telegraf configuration description | string | | +| `-f` | `--file` | Path to Telegraf configuration | string | | +| `-h` | `--help` | Help for the `update` command | | | +| | `--hide-headers` | Hide table headers | | `INFLUX_HIDE_HEADERS` | +| | `--json` | Output data as JSON | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | Telegraf configuration name | string | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Update a Telegraf configuration +```sh +influx telegrafs update \ + --id 0Xx0oox00XXoxxoo1 \ + --name "Example configuration name" \ + --description "Example Telegraf configuration description" \ + --file /path/to/telegraf.conf +``` + +##### Update a Telegraf configuration via stdin +```sh +cat /path/to/telegraf.conf | influx telegrafs update \ + --id 0Xx0oox00XXoxxoo1 \ + --name "Example configuration name" \ + --description "Example Telegraf configuration description" \ +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/template/_index.md b/content/influxdb/v2.5/reference/cli/influx/template/_index.md new file mode 100644 index 000000000..f7b5dafa3 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/template/_index.md @@ -0,0 +1,86 @@ +--- +title: influx template +description: The `influx template` command summarizes the specified InfluxDB template. +menu: + influxdb_2_5_ref: + name: influx template + parent: influx +weight: 101 +influxdb/v2.5/tags: [templates] +aliases: + - /influxdb/v2.5/reference/cli/influx/pkg/summary/ +cascade: + related: + - /influxdb/v2.5/influxdb-templates/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + metadata: [influx CLI 2.0.1+, InfluxDB 2.0.1+] +--- + +The `influx template` command summarizes the specified InfluxDB template. +The command returns information about all resources included in the template. + +## Usage +``` +influx template [flags] +influx template [command] +``` + +## Subcommands +| Subcommand | Description | +|:---------- |:----------- | +| [validate](/influxdb/v2.5/reference/cli/influx/template/validate) | Validate a template | + +## Flags +| Flag | | Description | Input Type | {{< cli/mapped >}} | +|:---- |:--- |:----------- |:---------- |:------------------ | +| | `--disable-color` | Disable color in output | | | +| | `--disable-table-borders` | Disable table borders | | | +| `-e` | `--encoding` | Template encoding (`yaml`,`yml`,`json`, or `jsonnet`) | string | | +| `-f` | `--file` | Path to template file (supports HTTP(S) URLs or file paths) | stringArray | | +| `-h` | `--help` | Help for the `template` command | | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-R` | `--recurse` | Recurse through files in the directory specified in `-f`, `--file` | | | + +## Examples + +{{< cli/influx-creds-note >}} + +**Summarize InfluxDB Templates:** + +- [from a local file](#summarize-an-influxdb-template-from-a-local-file) +- [from multiple files](#summarize-influxdb-templates-from-multiple-files) +- [from a URL](#summarize-an-influxdb-template-from-a-url) +- [from a directory](#summarize-all-influxdb-templates-in-a-directory) +- [using a specific encoding](#specify-the-encoding-of-the-influxdb-template-to-summarize) + +##### Summarize an InfluxDB template from a local file +```sh +influx template --file /path/to/template.yml +``` + +##### Summarize InfluxDB templates from multiple files +```sh +influx template \ + --file /path/to/template1.yml \ + --file /path/to/template2.yml +``` + +##### Summarize an InfluxDB template from a URL +```sh +influx template --file https://example.com/path/to/template.yml +``` + +##### Summarize all InfluxDB templates in a directory +```sh +influx template \ + --file /path/to/template/dir/ \ + --recurse +``` + +##### Specify the encoding of the InfluxDB template to summarize +```sh +influx template \ + --file /path/to/template \ + --encoding json +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/template/validate.md b/content/influxdb/v2.5/reference/cli/influx/template/validate.md new file mode 100644 index 000000000..883c17849 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/template/validate.md @@ -0,0 +1,70 @@ +--- +title: influx template validate +description: > + The `influx template validate` command validates the provided InfluxDB template. +menu: + influxdb_2_5_ref: + parent: influx template +weight: 201 +aliases: + - /influxdb/v2.5/reference/cli/influx/pkg/validate/ +--- + +The `influx template validate` command validates the provided InfluxDB template. + +## Usage +``` +influx template validate [flags] +``` + +## Flags + +| Flag | | Description | Input Type | +|:---- |:--- |:----------- |:---------- | +| `-e` | `--encoding` | Template encoding | string | +| `-f` | `--file` | Path to template file (supports HTTP(S) URLs or file paths) | stringArray | +| `-h` | `--help` | Help for the `validate` command | | +| `-R` | `--recurse` | Recurse through files in the directory specified in `-f`, `--file` | | + +## Examples + +{{< cli/influx-creds-note >}} + +**Validate InfluxDB Templates:** + +- [from a local file](#validate-an-influxdb-template-from-a-local-file) +- [from multiple files](#validate-influxdb-templates-from-multiple-files) +- [from a URL](#validate-an-influxdb-template-from-a-url) +- [from a directory](#validate-all-influxdb-templates-in-a-directory) +- [using a specific encoding](#specify-the-encoding-of-the-influxdb-template-to-validate) + +##### Validate an InfluxDB template from a local file +```sh +influx template validate --file /path/to/template.yml +``` + +##### Validate InfluxDB templates from multiple files +```sh +influx template validate \ + --file /path/to/template1.yml \ + --file /path/to/template2.yml +``` + +##### Validate an InfluxDB template from a URL +```sh +influx template validate --file https://example.com/path/to/template.yml +``` + +##### Validate all InfluxDB templates in a directory +```sh +influx template validate \ + --file /path/to/template/dir/ \ + --recurse +``` + +##### Specify the encoding of the InfluxDB template to validate +```sh +influx template validate \ + --file /path/to/template \ + --encoding json +``` \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influx/transpile/_index.md b/content/influxdb/v2.5/reference/cli/influx/transpile/_index.md new file mode 100644 index 000000000..3d45aa0cc --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/transpile/_index.md @@ -0,0 +1,55 @@ +--- +title: influx transpile +description: > + The `influx transpile` command transpiles an InfluxQL query to Flux source code. +weight: 101 +related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions +metadata: [influx CLI 2.0.0 – 2.0.5] +prepend: + block: warn + content: | + ### Removed in InfluxDB OSS 2.0.5 + The `influx transpile` command was removed in **InfluxDB 2.0.5**. + [Use InfluxQL to query InfluxDB](/influxdb/v2.5/query-data/influxql/). + For information about manually converting InfluxQL queries to Flux, see: + + - [Get started with Flux](/flux/v0.x/get-started/) + - [Query data with Flux](/influxdb/v2.5/query-data/flux/) + - [Migrate continuous queries to Flux tasks](/influxdb/v2.5/upgrade/v1-to-v2/migrate-cqs/) +--- + +The `influx transpile` command transpiles an InfluxQL query to Flux source code. +The transpiled query assumes the bucket name is `/` +and includes absolute time ranges using the provided `--now` time. + +## Usage +``` +influx transpile [InfluxQL query] [flags] +``` + +{{% note %}} +The InfluxQL query must be valid and contain both a database and measurement. +See the [InfluxQL documentation](/{{< latest "influxdb" "v1" >}}/query_language/) for more information. +{{% /note %}} + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `transpile` command | +| | `--now` | RFC3339Nano timestamp to use as `now()` time (default is current UTC time) | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Transpile InfluxQL queries to Flux +```sh +## Transpile an InfluxQL query that specifies the database, +## retention policy, and measurement. +influx transpile 'SELECT example-field FROM db.rp.measurement' + +## Transpile InfluxQL query using default retention policy +influx transpile 'SELECT example-field FROM db..measurement' +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/user/_index.md b/content/influxdb/v2.5/reference/cli/influx/user/_index.md new file mode 100644 index 000000000..99e63979c --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/user/_index.md @@ -0,0 +1,39 @@ +--- +title: influx user +description: The `influx user` command and its subcommands manage user information in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx user + parent: influx +weight: 101 +influxdb/v2.5/tags: [users] +cascade: + related: + - /influxdb/v2.5/users/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +canonical: /{{< latest "influxdb" "v2" >}}/reference/cli/influx/user/ +--- + +The `influx user` command and its subcommands manage user information in InfluxDB. + +## Usage +``` +influx user [flags] +influx user [command] +``` + +## Subcommands +| Subcommand | Description | +|:---------- |:----------- | +| [create](/influxdb/v2.5/reference/cli/influx/user/create) | Create a user | +| [delete](/influxdb/v2.5/reference/cli/influx/user/delete) | Delete a user | +| [list](/influxdb/v2.5/reference/cli/influx/user/list) | List users | +| [password](/influxdb/v2.5/reference/cli/influx/user/password) | Update a user's password | +| [update](/influxdb/v2.5/reference/cli/influx/user/update) | Update a user | + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `user` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/user/create.md b/content/influxdb/v2.5/reference/cli/influx/user/create.md new file mode 100644 index 000000000..6b5376e70 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/user/create.md @@ -0,0 +1,44 @@ +--- +title: influx user create +description: The `influx user create` command creates a user in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx user create + parent: influx user +weight: 201 +canonical: /{{< latest "influxdb" "v2" >}}/reference/cli/influx/user/create/ +--- + +The `influx user create` command creates a user in InfluxDB. + +## Usage +``` +influx user create [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | ({{< req >}}) Username | string | `INFLUX_NAME` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| `-p` | `--password` | User password | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Create a user +```sh +influx user create \ + --name example-username \ + --password ExAmPl3PA55W0rD +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/user/delete.md b/content/influxdb/v2.5/reference/cli/influx/user/delete.md new file mode 100644 index 000000000..276404140 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/user/delete.md @@ -0,0 +1,39 @@ +--- +title: influx user delete +description: The `influx user delete` command deletes a specified user. +menu: + influxdb_2_5_ref: + name: influx user delete + parent: influx user +weight: 201 +canonical: /{{< latest "influxdb" "v2" >}}/reference/cli/influx/user/delete/ +--- + +The `influx user delete` command deletes a specified user in InfluxDB. + +## Usage +``` +influx user delete [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `delete` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| `-i` | `--id` | ({{< req >}}) User ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Delete a user +```sh +influx user delete --id 0Xx0oox00XXoxxoo1 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/user/list.md b/content/influxdb/v2.5/reference/cli/influx/user/list.md new file mode 100644 index 000000000..7aa7b6ab5 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/user/list.md @@ -0,0 +1,59 @@ +--- +title: influx user list +description: The `influx user list` lists users in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx user list + parent: influx user +weight: 201 +aliases: + - /influxdb/v2.5/reference/cli/influx/user/find +canonical: /{{< latest "influxdb" "v2" >}}/reference/cli/influx/user/list/ +--- + +The `influx user list` command lists users in InfluxDB. + +## Usage +``` +influx user list [flags] +``` + +#### Command aliases +`list`, `ls`, `find` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| `-i` | `--id` | User ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | Username | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +- [List all users](#list-all-users) +- [List a specific user by username](#list-a-specific-user-by-username) +- [List a specific user by ID](#list-a-specific-user-by-id) + +##### List all users +```sh +influx user list +``` + +##### List a specific user by username +```sh +influx user list --name example-username +``` + +##### List a specific user by ID +```sh +influx user list --id 0Xx0oox00XXoxxoo1 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/user/password.md b/content/influxdb/v2.5/reference/cli/influx/user/password.md new file mode 100644 index 000000000..d6116aa5d --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/user/password.md @@ -0,0 +1,48 @@ +--- +title: influx user password +description: The `influx user password` command updates the password for a user in InfluxDB. +menu: + influxdb_2_5_ref: + name: influx user password + parent: influx user +weight: 201 +related: + - /influxdb/v2.5/users/change-password/ +canonical: /{{< latest "influxdb" "v2" >}}/reference/cli/influx/user/password/ +--- + +The `influx user password` command updates the password for a user in InfluxDB. + +## Usage +``` +influx user password [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `password` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| `-i` | `--id` | User ID | string | | +| `-n` | `--name` | Username | string | | +| | `--password` | Use `password` flag to send your password instead of typing it in | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Update a user password using a username +```sh +influx user password --name example-username +# Prompts for password +``` + +##### Update a user password using a user ID +```sh +influx user password --id 0Xx0oox00XXoxxoo1 +# Prompts for password +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/user/update.md b/content/influxdb/v2.5/reference/cli/influx/user/update.md new file mode 100644 index 000000000..fd73544da --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/user/update.md @@ -0,0 +1,43 @@ +--- +title: influx user update +description: > + The `influx user update` command updates information related to a user such as their user name. +menu: + influxdb_2_5_ref: + name: influx user update + parent: influx user +weight: 201 +canonical: /{{< latest "influxdb" "v2" >}}/reference/cli/influx/user/update/ +--- + +The `influx user update` command updates information related to a user in InfluxDB. + +## Usage +``` +influx user update [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:----------------------------------------------------------------------|:----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `update` command | | | +| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| `-i` | `--id` | ({{< req >}}) User ID | string | | +| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` | +| `-n` | `--name` | Username | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Update a username +```sh +influx user update \ + --id 0Xx0oox00XXoxxoo1 \ + --name new-username +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/_index.md b/content/influxdb/v2.5/reference/cli/influx/v1/_index.md new file mode 100644 index 000000000..404c8cd50 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/_index.md @@ -0,0 +1,37 @@ +--- +title: influx v1 +description: > + The `influx v1` command provides commands for working with the InfluxDB 1.x API in InfluxDB 2.x. +menu: + influxdb_2_5_ref: + name: influx v1 + parent: influx +weight: 101 +related: + - /influxdb/v2.5/upgrade/v1-to-v2/ + - /influxdb/v2.5/reference/api/influxdb-1x/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, `influx` CLI—Flag patterns and conventions +cascade: + metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +--- + +The `influx v1` command provides commands for working with the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/) in InfluxDB {{< current-version >}}. + +## Usage +``` +influx v1 [flags] +influx v1 [command] +``` + +## Subcommands +| Subcommand | Description | +| :----------------------------------------------------- | :---------------------------------------------------------------- | +| [auth](/influxdb/v2.5/reference/cli/influx/v1/auth/) | Authorization management commands for v1 APIs | +| [dbrp](/influxdb/v2.5/reference/cli/influx/v1/dbrp/) | Database retention policy mapping management commands for v1 APIs | +| [shell](/influxdb/v2.5/reference/cli/influx/v1/shell/) | Start an InfluxQL shell | + +## Flags +| Flag | | Description | +|:-----|:---------|:--------------------------| +| `-h` | `--help` | Help for the `v1` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/auth/_index.md b/content/influxdb/v2.5/reference/cli/influx/v1/auth/_index.md new file mode 100644 index 000000000..ed5ee130e --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/auth/_index.md @@ -0,0 +1,53 @@ +--- +title: influx v1 auth +description: > + The `influx v1 auth` subcommands provide authorization management for the InfluxDB 1.x compatibility API. +menu: + influxdb_2_5_ref: + name: influx v1 auth + parent: influx v1 +weight: 101 +influxdb/v2.5/tags: [authorization] +cascade: + related: + - /influxdb/v2.5/upgrade/v1-to-v2/ + - /influxdb/v2.5/reference/api/influxdb-1x/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, `influx` CLI—Flag patterns and conventions +--- + +The `influx v1 auth` subcommands provide authorization management for the +[InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). + +InfluxDB {{< current-version >}} uses [API tokens](/influxdb/v2.5/security/tokens/) to authorize API requests. +The [1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/) lets clients authenticate with InfluxDB {{< current-version >}} using the InfluxDB 1.x convention of username and password. + +{{% note %}} +1.x-compatible authorizations are separate from the credentials used to log +into the InfluxDB user interface. +{{% /note %}} + +## Usage +``` +influx v1 auth [flags] +influx v1 auth [command] +``` + +#### Command aliases +`auth`, `authorization` + +## Commands + +| Command | Description | +|:----------------------------------------------------------------------------|:---------------------------------------------| +| [create](/influxdb/v2.5/reference/cli/influx/v1/auth/create/) | Create authorization | +| [delete](/influxdb/v2.5/reference/cli/influx/v1/auth/delete/) | Delete authorization | +| [list](/influxdb/v2.5/reference/cli/influx/v1/auth/list/) | List authorizations | +| [set-active](/influxdb/v2.5/reference/cli/influx/v1/auth/set-active/) | Activate an authorization | +| [set-inactive](/influxdb/v2.5/reference/cli/influx/v1/auth/set-inactive/) | Deactivate an authorization | +| [set-password](/influxdb/v2.5/reference/cli/influx/v1/auth/set-password/) | Set a password for an existing authorization | + +## Flags +| Flag | | Description | +|:-----|:---------|:--------------------------------| +| `-h` | `--help` | Help for the `v1 auth ` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/auth/create.md b/content/influxdb/v2.5/reference/cli/influx/v1/auth/create.md new file mode 100644 index 000000000..e9e403d2b --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/auth/create.md @@ -0,0 +1,54 @@ +--- +title: influx v1 auth create +description: > + The `influx v1 auth create` command creates an authorization in the InfluxDB 1.x compatibility API. +menu: + influxdb_2_5_ref: + name: influx v1 auth create + parent: influx v1 auth +weight: 101 +influxdb/v2.5/tags: [authorization] +updated_in: CLI v2.0.3 +--- + +The `influx v1 auth create` command creates a legacy authorization with the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). + +## Usage +``` +influx v1 auth create [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :--------------------------------------------------------------------------------------------------------- | :---------: | :--------------------- | +| `-c` | `--active-config` | Config name to use for command | string | `INFLUX_ACTIVE_CONFIG` | +| | `--configs-path` | Path to the influx CLI configurations (default: `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-d` | `--description` | Token description | string | | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide the table headers (default: `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB | string | `INFLUX_HOST` | +| | `--json` | Output data as JSON (default: `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--no-password` | Don't prompt for a password (to use the token, you must set a password with `influx v1 auth set-password`) | | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--password` | Password to set on the authorization | | | +| | `--read-bucket` | Bucket ID to assign read permissions to | stringArray | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| | `--username` | ({{< req >}}) Token username | string | | +| | `--write-bucket` | Bucket ID to assign write permissions to | stringArray | | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Create a v1 authorization with read and write permissions +```sh +# Create an authorization with read and write access to bucket 00xX00o0X001 +# but only read access to bucket 00xX00o0X002 +influx v1 auth create \ + --read-bucket 00xX00o0X001 \ + --read-bucket 00xX00o0X002 \ + --write-bucket 00xX00o0X001 \ + --username example-user +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/auth/delete.md b/content/influxdb/v2.5/reference/cli/influx/v1/auth/delete.md new file mode 100644 index 000000000..ae3d17212 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/auth/delete.md @@ -0,0 +1,41 @@ +--- +title: influx v1 auth delete +description: > + The `influx v1 auth delete` command deletes an authorization in the InfluxDB 1.x compatibility API. +menu: + influxdb_2_5_ref: + name: influx v1 auth delete + parent: influx v1 auth +weight: 101 +influxdb/v2.5/tags: [authorization] +--- + +The `influx v1 auth delete` command deletes an authorization in the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). + +## Usage +``` +influx v1 auth delete [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :----------------------------------------------------------------------- | :--------: | :--------------------- | +| `-c` | `--active-config` | Config name to use for command | string | `INFLUX_ACTIVE_CONFIG` | +| | `--configs-path` | Path to the influx CLI configurations (default: `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `delete` command | | | +| | `--hide-headers` | Hide the table headers (default: `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB | string | `INFLUX_HOST` | +| `-i` | `--id` | ({{< req >}}) Authorization ID | string | | +| | `--json` | Output data as JSON (default: `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| | `--username` | Authorization username | string | `INFLUX_USERNAME` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Delete a v1 authorization +```sh +influx v1 auth delete --id 00xX00o0X001 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/auth/list.md b/content/influxdb/v2.5/reference/cli/influx/v1/auth/list.md new file mode 100644 index 000000000..1362c85e1 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/auth/list.md @@ -0,0 +1,68 @@ +--- +title: influx v1 auth list +description: > + The `influx v1 auth list` command lists and searches authorizations in the InfluxDB 1.x compatibility API. +menu: + influxdb_2_5_ref: + name: influx v1 auth list + parent: influx v1 auth +weight: 101 +influxdb/v2.5/tags: [authorization] +--- + +The `influx v1 auth list` command lists and searches authorizations in the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). + +## Usage +``` +influx v1 auth list [flags] +``` + +#### Command aliases +`list`, `ls`, `find` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :----------------------------------------------------------------------- | :--------: | :--------------------- | +| `-c` | `--active-config` | Config name to use for command | string | `INFLUX_ACTIVE_CONFIG` | +| | `--configs-path` | Path to the influx CLI configurations (default: `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide the table headers (default: `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB | string | `INFLUX_HOST` | +| `-i` | `--id` | Authorization ID | string | | +| | `--json` | Output data as JSON (default: `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| `-u` | `--user` | InfluxDB user | string | | +| | `--user-id` | InfluxDB user ID | string | | +| | `--username` | Authorization username | string | `INFLUX_USERNAME` | + +## Examples + +{{< cli/influx-creds-note >}} + +- [List all v1 authorizations](#list-all-v1-authorizations) +- [List v1 authorizations associated with a username](#list-v1-authorizations-associated-with-a-username) +- [List v1 authorizations associated with a user ID](#list-v1-authorizations-associated-with-a-user-id) +- [List a specific v1 authorization by ID](#list-a-specific-v1-authorization-by-id) + +##### List all v1 authorizations +```sh +influx v1 auth list +``` + +##### List v1 authorizations associated with a username +```sh +influx v1 auth list --user example-username +``` + +##### List v1 authorizations associated with a user ID +```sh +influx v1 auth list --user-id 00xX00o0X001 +``` + +##### List a specific v1 authorization by ID +```sh +influx v1 auth list --id 00xX00o0X001 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/auth/set-active.md b/content/influxdb/v2.5/reference/cli/influx/v1/auth/set-active.md new file mode 100644 index 000000000..8138fe812 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/auth/set-active.md @@ -0,0 +1,42 @@ +--- +title: influx v1 auth set-active +description: > + The `influx v1 auth set-active` command activates an authorization in the InfluxDB 1.x compatibility API. +menu: + influxdb_2_5_ref: + name: influx v1 auth set-active + parent: influx v1 auth +weight: 101 +influxdb/v2.5/tags: [authorization] +--- + +The `influx v1 auth set-active` command activates an authorization in the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). +Only active authorizations grant access to InfluxDB. + +## Usage +``` +influx v1 auth set-active [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:------------------|:-------------------------------------------------------------------------|:----------:|:------------------------| +| `-c` | `--active-config` | Config name to use for command | string | `$INFLUX_ACTIVE_CONFIG` | +| | `--configs-path` | Path to the influx CLI configurations (default: `~/.influxdbv2/configs`) | string | `$INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `set-active` command | | | +| | `--hide-headers` | Hide the table headers (default: `false`) | | `$INFLUX_HIDE_HEADERS` | +| | `--host` | ({{< req >}}) HTTP address of InfluxDB | string | `$INFLUX_HOST` | +| `-i` | `--id` | Authorization ID | string | | +| | `--json` | Output data as JSON (default: `false`) | | `$INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `$INFLUX_TOKEN` | +| | `--username` | Authorization username | string | `$INFLUX_USERNAME` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Activate a v1 authorization +```sh +influx v1 auth set-active --id 00xX00o0X001 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/auth/set-inactive.md b/content/influxdb/v2.5/reference/cli/influx/v1/auth/set-inactive.md new file mode 100644 index 000000000..b8860b2b5 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/auth/set-inactive.md @@ -0,0 +1,42 @@ +--- +title: influx v1 auth set-inactive +description: > + The `influx v1 auth set-inactive` command deactivates an authorization in the InfluxDB 1.x compatibility API. +menu: + influxdb_2_5_ref: + name: influx v1 auth set-inactive + parent: influx v1 auth +weight: 101 +influxdb/v2.5/tags: [authorization] +--- + +The `influx v1 auth set-inactive` command deactivates an authorization in the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). +Inactive authorizations **do not** grant access to InfluxDB. + +## Usage +``` +influx v1 auth set-inactive [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :----------------------------------------------------------------------- | :--------: | :--------------------- | +| `-c` | `--active-config` | Config name to use for command | string | `INFLUX_ACTIVE_CONFIG` | +| | `--configs-path` | Path to the influx CLI configurations (default: `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `set-inactive` command | | | +| | `--hide-headers` | Hide the table headers (default: `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB | string | `INFLUX_HOST` | +| `-i` | `--id` | ({{< req >}}) Authorization ID | string | | +| | `--json` | Output data as JSON (default: `false`) | | `INFLUX_OUTPUT_JSON` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| | `--username` | Authorization username | string | `INFLUX_USERNAME` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Deactivate a v1 authorization +```sh +influx v1 auth set-inactive --id 00xX00o0X001 +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/auth/set-password.md b/content/influxdb/v2.5/reference/cli/influx/v1/auth/set-password.md new file mode 100644 index 000000000..a4cb7789b --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/auth/set-password.md @@ -0,0 +1,43 @@ +--- +title: influx v1 auth set-password +description: > + The `influx v1 auth set-password` command sets a password for an existing authorization in the InfluxDB 1.x compatibility API. +menu: + influxdb_2_5_ref: + name: influx v1 auth set-password + parent: influx v1 auth +weight: 101 +influxdb/v2.5/tags: [authorization] +updated_in: CLI v2.0.3 +--- + +The `influx v1 auth set-password` command sets a password for an existing authorization in the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). + +## Usage +``` +influx v1 auth set-password [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :----------------------------------------------------------------------- | :--------: | :--------------------- | +| `-c` | `--active-config` | Config name to use for command | string | `INFLUX_ACTIVE_CONFIG` | +| | `--configs-path` | Path to the influx CLI configurations (default: `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `set-password` command | | | +| | `--host` | HTTP address of InfluxDB | string | `INFLUX_HOST` | +| `-i` | `--id` | Authorization ID | string | | +| | `--password` | Password to set on the authorization | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| | `--username` | Authorization username | string | `INFLUX_USERNAME` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Set a password for a v1 authorization +```sh +influx v1 auth set-password \ + --id 00xX00o0X001 \ + --password ExAmPl3PA55W0rD +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/_index.md b/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/_index.md new file mode 100644 index 000000000..a0716864c --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/_index.md @@ -0,0 +1,42 @@ +--- +title: influx v1 dbrp +description: > + The `influx v1 dbrp` subcommands manage database and retention policy mappings (DBRP) + for the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). +menu: + influxdb_2_5_ref: + name: influx v1 dbrp + parent: influx v1 +weight: 101 +influxdb/v2.5/tags: [DBRP] +cascade: + related: + - /influxdb/v2.5/upgrade/v1-to-v2/ + - /influxdb/v2.5/reference/api/influxdb-1x/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, `influx` CLI—Flag patterns and conventions + metadata: [influx CLI 2.0.2+, InfluxDB 2.0.1+] +--- + +The `influx v1 dbrp` subcommands manage database and retention policy mappings (DBRP) +for the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). + +## Usage +``` +influx v1 dbrp [flags] +influx v1 dbrp [command] +``` + +## Commands + +| Command | Description | +|:------------------------------------------------------------- |:--------------------- | +| [create](/influxdb/v2.5/reference/cli/influx/v1/dbrp/create/) | Create a DBRP mapping | +| [delete](/influxdb/v2.5/reference/cli/influx/v1/dbrp/delete/) | Delete a DBRP mapping | +| [list](/influxdb/v2.5/reference/cli/influx/v1/dbrp/list/) | List DBRP mappings | +| [update](/influxdb/v2.5/reference/cli/influx/v1/dbrp/update/) | Update a DBRP mapping | + +## Flags +| Flag | | Description | +|:-----|:---------|:--------------------------------| +| `-h` | `--help` | Help for the `v1 dbrp ` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/create.md b/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/create.md new file mode 100644 index 000000000..ee08e55d6 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/create.md @@ -0,0 +1,50 @@ +--- +title: influx v1 dbrp create +description: > + The `influx v1 dbrp create` command creates a DBRP mapping in the InfluxDB 1.x compatibility API. +menu: + influxdb_2_5_ref: + name: influx v1 dbrp create + parent: influx v1 dbrp +weight: 101 +influxdb/v2.5/tags: [DBRP] +--- + +The `influx v1 dbrp create` command creates a DBRP mapping with the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). + +## Usage +``` +influx v1 dbrp create [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :----------------------------------------------------------------------- | :--------: | :--------------------- | +| `-c` | `--active-config` | Config name to use for command | string | `INFLUX_ACTIVE_CONFIG` | +| | `--bucket-id` | Bucket ID to map to | | | +| | `--configs-path` | Path to the influx CLI configurations (default: `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| | `--db` | InfluxDB v1 database to map from | | | +| | `--default` | Set DBRP mapping's retention policy as default | | | +| `-h` | `--help` | Help for the `create` command | | | +| | `--hide-headers` | Hide table headers (default: `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB | string | `INFLUX_HOST` | +| | `--json` | Output data as JSON (default: `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--rp` | InfluxDB v1 retention policy to map from | | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + + +## Examples + +{{< cli/influx-creds-note >}} + +##### Create a DBRP mapping +```sh +influx v1 dbrp create \ + --bucket-id 12ab34cd56ef \ + --db example-db \ + --rp example-rp \ + --default +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/delete.md b/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/delete.md new file mode 100644 index 000000000..1104696f5 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/delete.md @@ -0,0 +1,42 @@ +--- +title: influx v1 dbrp delete +description: > + The `influx v1 dbrp delete` command deletes a DBRP mapping in the InfluxDB 1.x compatibility API. +menu: + influxdb_2_5_ref: + name: influx v1 dbrp delete + parent: influx v1 dbrp +weight: 101 +influxdb/v2.5/tags: [DBRP] +--- + +The `influx v1 dbrp delete` command deletes a DBRP mapping in the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). + +## Usage +``` +influx v1 dbrp delete [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| ---- | ----------------- | ------------------------------------------------------------------------ | ---------- | ---------------------- | +| `-c` | `--active-config` | Config name to use for command | string | `INFLUX_ACTIVE_CONFIG` | +| | `--configs-path` | Path to the influx CLI configurations (default: `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| `-h` | `--help` | Help for the `delete` command | | | +| | `--hide-headers` | Hide the table headers (default: `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB | string | `INFLUX_HOST` | +| | `--id` | ({{< req >}}) DBRP ID | string | | +| | `--json` | Output data as JSON (default: `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Delete a DBRP mapping +```sh +influx v1 dbrp delete --id 12ab34cd56ef +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/list.md b/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/list.md new file mode 100644 index 000000000..198346187 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/list.md @@ -0,0 +1,64 @@ +--- +title: influx v1 dbrp list +description: > + The `influx v1 dbrp list` command lists and searches DBRP mappings in the InfluxDB 1.x compatibility API. +menu: + influxdb_2_5_ref: + name: influx v1 dbrp list + parent: influx v1 dbrp +weight: 101 +influxdb/v2.5/tags: [dbrp] +--- + +The `influx v1 dbrp list` command lists and searches DBRP mappings in the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). + +## Usage +``` +influx v1 dbrp list [flags] +``` + +## Flags + +| Flag | | Description | Input type | {{< cli/mapped >}} | +| ---- | ----------------- | ------------------------------------------------------------------------ | ---------- | ---------------------- | +| `-c` | `--active-config` | Config name to use for command | string | `INFLUX_ACTIVE_CONFIG` | +| | `--bucket-id` | Bucket ID | | | +| | `--configs-path` | Path to the influx CLI configurations (default: `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| | `--db` | Filter DBRP mappings by database | | | +| | `--default` | Limit results to default mapping | | | +| `-h` | `--help` | Help for the `list` command | | | +| | `--hide-headers` | Hide the table headers (default: `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB | string | `INFLUX_HOST` | +| | `--id` | Limit results to a specified mapping | string | | +| | `--json` | Output data as JSON (default: `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--rp` | Filter DBRP mappings by InfluxDB v1 retention policy | string | `INFLUX_ORG` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### List all DBRP mappings in your organization +```sh +influx v1 dbrp list +``` + +##### List DBRP mappings for specific buckets +```sh +influx v1 dbrp list \ + --bucket-id 12ab34cd56ef78 \ + --bucket-id 09zy87xw65vu43 +``` + +##### List DBRP mappings with a specific database +```sh +influx v1 dbrp list --db example-db +``` + +##### List DBRP mappings with a specific retention policy +```sh +influx v1 dbrp list --rp example-rp +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/update.md b/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/update.md new file mode 100644 index 000000000..587b8a7e3 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/dbrp/update.md @@ -0,0 +1,53 @@ +--- +title: influx v1 dbrp update +description: > + The `influx v1 dbrp update` command updates a DBRP mapping in the InfluxDB 1.x compatibility API. +menu: + influxdb_2_5_ref: + name: influx v1 dbrp update + parent: influx v1 dbrp +weight: 101 +influxdb/v2.5/tags: [DBRP] +--- + +The `influx v1 dbrp update` command updates a DBRP mapping in the [InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). + +## Usage +``` +influx v1 dbrp update [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :----------------------------------------------------------------------- | :--------: | :--------------------- | +| `-c` | `--active-config` | Config name to use for command | string | `INFLUX_ACTIVE_CONFIG` | +| | `--configs-path` | Path to the influx CLI configurations (default: `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| | `--default` | Set DBRP mapping's retention policy as default | | | +| `-h` | `--help` | Help for the `update` command | | | +| | `--hide-headers` | Hide the table headers (default: `false`) | | `INFLUX_HIDE_HEADERS` | +| | `--host` | HTTP address of InfluxDB | string | `INFLUX_HOST` | +| | `--id` | ({{< req >}}) DBRP ID | string | | +| | `--json` | Output data as JSON (default: `false`) | | `INFLUX_OUTPUT_JSON` | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| `-r` | `--rp` | InfluxDB v1 retention policy to map from | | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Set a DBRP mapping as default +```sh +influx v1 dbrp update \ + --id 12ab34cd56ef78 \ + --default +``` + +##### Update the retention policy of a DBRP mapping +```sh +influx v1 dbrp update \ + --id 12ab34cd56ef78 \ + --rp new-rp-name +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/v1/shell.md b/content/influxdb/v2.5/reference/cli/influx/v1/shell.md new file mode 100644 index 000000000..65dfaa306 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/v1/shell.md @@ -0,0 +1,59 @@ +--- +title: influx v1 shell +description: > + The `influx v1 shell` subcommand starts an InfluxQL shell (REPL). +menu: + influxdb_2_5_ref: + name: influx v1 shell + parent: influx v1 +weight: 101 +influxdb/v2.5/tags: [InfluxQL] +related: + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + - /influxdb/v2.5/query-data/influxql/ + - /influxdb/v2.5/tools/influxql-shell/ +metadata: [influx CLI 2.4.0+, InfluxDB 2.0.1+] +--- + +The `influx v1 shell` subcommand starts an InfluxQL shell (REPL). + +{{% note %}} +#### Set up database and retention policy (DBRP) mapping + +InfluxQL queries require a database and retention policy to query data. +In InfluxDB {{% current-version %}}, databases and retention policies have been +combined and replaced with [buckets](/influxdb/v2.5/reference/glossary/#bucket). +To use the InfluxQL to query an InfluxDB {{% current-version %}} bucket, first +map your DBRP combinations to an appropriate bucket. For more information, see +[Query data with InfluxQL](/influxdb/v2.5/query-data/influxql/). + +{{% /note %}} + +## Usage + +```sh +influx v1 shell [flags] +``` + +## Flags + +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :---------------- | :----------------------------------------------------------------------- | :--------: | :--------------------- | +| `-c` | `--active-config` | Config name to use for command | string | `INFLUX_ACTIVE_CONFIG` | +| | `--configs-path` | Path to the influx CLI configurations (default: `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| | `--host` | HTTP address of InfluxDB | string | `INFLUX_HOST` | +| | `--http-debug` | Inspect communication with InfluxDB servers. | | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | + +## Examples + +{{< cli/influx-creds-note >}} + +##### Start an InfluxQL shell +```sh +influx v1 shell +``` \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influx/version/_index.md b/content/influxdb/v2.5/reference/cli/influx/version/_index.md new file mode 100644 index 000000000..6d339edac --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/version/_index.md @@ -0,0 +1,25 @@ +--- +title: influx version +description: The `influx version` command outputs the current version of the influx command line interface (CLI). +influxdb/v2.5/tags: [influx, cli] +menu: + influxdb_2_5_ref: + parent: influx +weight: 202 +metadata: [influx CLI 2.0.0+] +--- + +The `influx version` command outputs the current version of the `influx` +command line interface (CLI). + +## Usage + +``` +influx version [flags] +``` + +## Flags + +| Flag | | Description | +| :--- | :------- | :----------------------------- | +| `-h` | `--help` | Help for the `version` command | diff --git a/content/influxdb/v2.5/reference/cli/influx/write/_index.md b/content/influxdb/v2.5/reference/cli/influx/write/_index.md new file mode 100644 index 000000000..b9c42814d --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/write/_index.md @@ -0,0 +1,287 @@ +--- +title: influx write +description: > + The `influx write` command writes data to InfluxDB via stdin or from a specified file. + Write data using line protocol, annotated CSV, or extended annotated CSV. +menu: + influxdb_2_5_ref: + name: influx write + parent: influx +weight: 101 +influxdb/v2.5/tags: [write] +cascade: + related: + - /influxdb/v2.5/write-data/ + - /influxdb/v2.5/write-data/developer-tools/csv/ + - /influxdb/v2.5/reference/syntax/line-protocol/ + - /influxdb/v2.5/reference/syntax/annotated-csv/ + - /influxdb/v2.5/reference/syntax/annotated-csv/extended/ + - /influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials + - /influxdb/v2.5/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions + metadata: [influx CLI 2.0.0+, InfluxDB 2.0.0+] +updated_in: CLI v2.0.5 +--- + +The `influx write` command writes data to InfluxDB via stdin or from a specified file. +Write data using [line protocol](/influxdb/v2.5/reference/syntax/line-protocol), +[annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv), or +[extended annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/extended/). +If you write CSV data, CSV annotations determine how the data translates into line protocol. + + +## Usage +``` +influx write [flags] +influx write [command] +``` + +{{% note %}} +### Required data +To write data to InfluxDB, you must provide the following for each row: + +- **measurement** +- **field** +- **value** + +#### Line protocol +In **line protocol**, the [structure of the line data](/influxdb/v2.5/reference/syntax/line-protocol/#elements-of-line-protocol) +determines the measurement, field, and value. + +#### Annotated CSV +In **annotated CSV**, measurements, fields, and values are represented by the +`_measurement`, `_field`, and `_value` columns. +Their types are determined by CSV annotations. +To successfully write annotated CSV to InfluxDB, include all +[annotation rows](/influxdb/v2.5/reference/syntax/annotated-csv/#annotations). + +#### Extended annotated CSV +In **extended annotated CSV**, measurements, fields, and values and their types are determined by +[CSV annotations](/influxdb/v2.5/reference/syntax/annotated-csv/extended/#csv-annotations). +{{% /note %}} + +## Subcommands +| Subcommand | Description | +|:---------- |:----------- | +| [dryrun](/influxdb/v2.5/reference/cli/influx/write/dryrun) | Write to stdout instead of InfluxDB | + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +| :--- | :------------------ | :------------------------------------------------------------------------------------------- | :---------: | :-------------------- | +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| `-b` | `--bucket` | Bucket name (mutually exclusive with `--bucket-id`) | string | `INFLUX_BUCKET_NAME` | +| | `--bucket-id` | Bucket ID (mutually exclusive with `--bucket`) | string | `INFLUX_BUCKET_ID` | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| | `--compression` | Input compression (`none` or `gzip`, default is `none` unless input file ends with `.gz`.) | string | | +| | `--debug` | Output errors to stderr | | | +| | `--encoding` | Character encoding of input (default `UTF-8`) | string | | +| | `--error-file` | Path to a file used for recording rejected row errors | string | | +| `-f` | `--file` | File to import | stringArray | | +| | `--format` | Input format (`lp` or `csv`, default `lp`) | string | | +| | `--header` | Prepend header line to CSV input data | string | | +| `-h` | `--help` | Help for the `write` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` | +| | `--max-line-length` | Maximum number of bytes that can be read for a single line (default `16000000`) | integer | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| `-p` | `--precision` | [Precision](/influxdb/v2.5/write-data/#timestamp-precision) of the timestamps (default `ns`) | string | `INFLUX_PRECISION` | +| | `--rate-limit` | Throttle write rate (examples: `5 MB / 5 min` or `1MB/s`). | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--skipHeader` | Skip first *n* rows of input data | integer | | +| | `--skipRowOnError` | Output CSV errors to stderr, but continue processing | | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| `-u` | `--url` | URL to import data from | stringArray | | + +## Examples + +{{< cli/influx-creds-note >}} + +###### Write line protocol + +- [via stdin](#write-line-protocol-via-stdin) +- [from a file](#write-line-protocol-from-a-file) +- [from multiple files](#write-line-protocol-from-multiple-files) +- [from a URL](#write-line-protocol-from-a-url) +- [from multiple URLs](#write-line-protocol-from-multiple-urls) +- [from multiple sources](#write-line-protocol-from-multiple-sources) +- [from a compressed file](#write-line-protocol-from-a-compressed-file) + +###### Write CSV data + +- [annotated CSV via stdin](#write-annotated-csv-data-via-stdin) +- [extended annotated CSV via stdin](#write-extended-annotated-csv-data-via-stdin) +- [from a file](#write-annotated-csv-data-from-a-file) +- [from multiple files](#write-annotated-csv-data-from-multiple-files) +- [from a URL](#write-annotated-csv-data-from-a-url) +- [from multiple URLs](#write-annotated-csv-data-from-multiple-urls) +- [from multiple sources](#write-annotated-csv-data-from-multiple-sources) +- [and prepend annotation headers](#prepend-csv-data-with-annotation-headers) +- [from a compressed file](#write-annotated-csv-data-from-a-compressed-file) +- [using rate limiting](#write-annotated-csv-data-using-rate-limiting) + +### Line protocol + +##### Write line protocol via stdin +```sh +influx write --bucket example-bucket " +m,host=host1 field1=1.2,field2=5i 1640995200000000000 +m,host=host2 field1=2.4,field2=3i 1640995200000000000 +" +``` + +##### Write line protocol from a file +```sh +influx write \ + --bucket example-bucket \ + --file path/to/line-protocol.txt +``` + +##### Write line protocol from multiple files +```sh +influx write \ + --bucket example-bucket \ + --file path/to/line-protocol-1.txt \ + --file path/to/line-protocol-2.txt +``` + +##### Write line protocol from a URL +```sh +influx write \ + --bucket example-bucket \ + --url https://example.com/line-protocol.txt +``` + +##### Write line protocol from multiple URLs +```sh +influx write \ + --bucket example-bucket \ + --url https://example.com/line-protocol-1.txt \ + --url https://example.com/line-protocol-2.txt +``` + +##### Write line protocol from multiple sources +```sh +influx write \ + --bucket example-bucket \ + --file path/to/line-protocol-1.txt \ + --url https://example.com/line-protocol-2.txt +``` + +##### Write line protocol from a compressed file +```sh +# The influx CLI assumes files with the .gz extension use gzip compression +influx write \ + --bucket example-bucket \ + --file path/to/line-protocol.txt.gz + +# Specify gzip compression for gzipped files without the .gz extension +influx write \ + --bucket example-bucket \ + --file path/to/line-protocol.txt.comp \ + --compression gzip +``` + +--- + +### CSV + +##### Write annotated CSV data via stdin +```sh +influx write \ + --bucket example-bucket \ + --format csv \ + "#group,false,false,false,false,true,true +#datatype,string,long,dateTime:RFC3339,double,string,string +#default,_result,,,,, +,result,table,_time,_value,_field,_measurement +,,0,2020-12-18T18:16:11Z,72.7,temp,sensorData +,,0,2020-12-18T18:16:21Z,73.8,temp,sensorData +,,0,2020-12-18T18:16:31Z,72.7,temp,sensorData +,,0,2020-12-18T18:16:41Z,72.8,temp,sensorData +,,0,2020-12-18T18:16:51Z,73.1,temp,sensorData +" +``` + +##### Write extended annotated CSV data via stdin +```sh +influx write \ + --bucket example-bucket \ + --format csv \ + "#constant measurement,sensorData +#datatype,datetime:RFC3339,double +time,temperature +2020-12-18T18:16:11Z,72.7 +2020-12-18T18:16:21Z,73.8 +2020-12-18T18:16:31Z,72.7 +2020-12-18T18:16:41Z,72.8 +2020-12-18T18:16:51Z,73.1 +" +``` + +##### Write annotated CSV data from a file +```sh +influx write \ + --bucket example-bucket \ + --file path/to/data.csv +``` + +##### Write annotated CSV data from multiple files +```sh +influx write \ + --bucket example-bucket \ + --file path/to/data-1.csv \ + --file path/to/data-2.csv +``` + +##### Write annotated CSV data from a URL +```sh +influx write \ + --bucket example-bucket \ + --url https://example.com/data.csv +``` + +##### Write annotated CSV data from multiple URLs +```sh +influx write \ + --bucket example-bucket \ + --url https://example.com/data-1.csv \ + --url https://example.com/data-2.csv +``` + +##### Write annotated CSV data from multiple sources +```sh +influx write \ + --bucket example-bucket \ + --file path/to/data-1.csv \ + --url https://example.com/data-2.csv +``` + +##### Prepend CSV data with annotation headers +```sh +influx write \ + --bucket example-bucket \ + --header "#constant measurement,birds" \ + --header "#datatype dateTime:2006-01-02,long,tag" \ + --file path/to/data.csv +``` + +##### Write annotated CSV data from a compressed file +```sh +# The influx CLI assumes files with the .gz extension use gzip compression +influx write \ + --bucket example-bucket \ + --file path/to/data.csv.gz + +# Specify gzip compression for gzipped files without the .gz extension +influx write \ + --bucket example-bucket \ + --file path/to/data.csv.comp \ + --compression gzip +``` + +##### Write annotated CSV data using rate limiting +```sh +influx write \ + --bucket example-bucket \ + --file path/to/data.csv \ + --rate-limit 5 MB / 5 min +``` diff --git a/content/influxdb/v2.5/reference/cli/influx/write/dryrun.md b/content/influxdb/v2.5/reference/cli/influx/write/dryrun.md new file mode 100644 index 000000000..af4f8905f --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influx/write/dryrun.md @@ -0,0 +1,192 @@ +--- +title: influx write dryrun +description: > + The `influx write dryrun` command prints write output to stdout instead of writing + to InfluxDB. Use this command to test writing data. +menu: + influxdb_2_5_ref: + name: influx write dryrun + parent: influx write +weight: 101 +influxdb/v2.5/tags: [write] +related: + - /influxdb/v2.5/write-data/ + - /influxdb/v2.5/write-data/developer-tools/csv/ + - /influxdb/v2.5/reference/syntax/line-protocol/ + - /influxdb/v2.5/reference/syntax/annotated-csv/ + - /influxdb/v2.5/reference/syntax/annotated-csv/extended/ +--- + +The `influx write dryrun` command prints write output to stdout instead of writing +to InfluxDB. Use this command to test writing data. + +Supports [line protocol](/influxdb/v2.5/reference/syntax/line-protocol), +[annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv), and +[extended annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/extended). +Output is always **line protocol**. + +## Usage +``` +influx write dryrun [flags] +``` + +## Flags +| Flag | | Description | Input type | {{< cli/mapped >}} | +|:-----|:--------------------|:--------------------------------------------------------------------------------|:-----------:|:----------------------| +| `-c` | `--active-config` | CLI configuration to use for command | string | | +| `-b` | `--bucket` | Bucket name (mutually exclusive with `--bucket-id`) | string | `INFLUX_BUCKET_NAME` | +| | `--bucket-id` | Bucket ID (mutually exclusive with `--bucket`) | string | `INFLUX_BUCKET_ID` | +| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` | +| | `--debug` | Output errors to stderr | | | +| | `--encoding` | Character encoding of input (default `UTF-8`) | string | | +| | `--error-file` | Path to a file used for recording rejected row errors | string | | +| `-f` | `--file` | File to import | stringArray | | +| | `--format` | Input format (`lp` or `csv`, default `lp`) | string | | +| | `--header` | Prepend header line to CSV input data | string | | +| `-h` | `--help` | Help for the `dryrun` command | | | +| | `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string | `INFLUX_HOST` | +| | `--max-line-length` | Maximum number of bytes that can be read for a single line (default `16000000`) | integer | | +| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` | +| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` | +| `-p` | `--precision` | Precision of the timestamps (default `ns`) | string | `INFLUX_PRECISION` | +| | `--rate-limit` | Throttle write rate (examples: `5 MB / 5 min` or `1MB/s`). | string | | +| | `--skip-verify` | Skip TLS certificate verification | | `INFLUX_SKIP_VERIFY` | +| | `--skipHeader` | Skip first *n* rows of input data | integer | | +| | `--skipRowOnError` | Output CSV errors to stderr, but continue processing | | | +| `-t` | `--token` | API token | string | `INFLUX_TOKEN` | +| `-u` | `--url` | URL to import data from | stringArray | | + +## Examples + +{{< cli/influx-creds-note >}} + +- [Dry run writing line protocol](#line-protocol) + - [via stdin](#dry-run-writing-line-protocol-via-stdin) + - [from a file](#dry-run-writing-line-protocol-from-a-file) + - [from multiple files](#dry-run-writing-line-protocol-from-multiple-files) + - [from a URL](#dry-run-writing-line-protocol-from-a-url) + - [from multiple URLs](#dry-run-writing-line-protocol-from-multiple-urls) + - [from multiple sources](#dry-run-writing-line-protocol-from-multiple-sources) + +- [Dry run writing CSV data](#csv) + - [via stdin](#dry-run-writing-annotated-csv-data-via-stdin) + - [from a file](#dry-run-writing-annotated-csv-data-from-a-file) + - [from multiple files](#dry-run-writing-annotated-csv-data-from-multiple-files) + - [from a URL](#dry-run-writing-annotated-csv-data-from-a-url) + - [from multiple URLs](#dry-run-writing-annotated-csv-data-from-multiple-urls) + - [from multiple sources](#dry-run-writing-annotated-csv-data-from-multiple-sources) + - [and prepending annotation headers](#dry-run-prepending-csv-data-with-annotation-headers) + + +### Line protocol + +##### Dry run writing line protocol via stdin +```sh +influx write --bucket example-bucket " +m,host=host1 field1=1.2 +m,host=host2 field1=2.4 +m,host=host1 field2=5i +m,host=host2 field2=3i +" +``` + +##### Dry run writing line protocol from a file +```sh +influx write dryrun \ + --bucket example-bucket \ + --file path/to/line-protocol.txt +``` + +##### Dry run writing line protocol from multiple files +```sh +influx write dryrun \ + --bucket example-bucket \ + --file path/to/line-protocol-1.txt \ + --file path/to/line-protocol-2.txt +``` + +##### Dry run writing line protocol from a URL +```sh +influx write dryrun \ + --bucket example-bucket \ + --url https://example.com/line-protocol.txt +``` + +##### Dry run writing line protocol from multiple URLs +```sh +influx write dryrun \ + --bucket example-bucket \ + --url https://example.com/line-protocol-1.txt \ + --url https://example.com/line-protocol-2.txt +``` + +##### Dry run writing line protocol from multiple sources +```sh +influx write dryrun \ + --bucket example-bucket \ + --file path/to/line-protocol-1.txt \ + --url https://example.com/line-protocol-2.txt +``` + +--- + +### CSV + +##### Dry run writing annotated CSV data via stdin +```sh +influx write dryrun \ + --bucket example-bucket \ + --format csv \ + "#datatype measurement,tag,tag,field,field,ignored,time +m,cpu,host,time_steal,usage_user,nothing,time +cpu,cpu1,host1,0,2.7,a,1482669077000000000 +cpu,cpu1,host2,0,2.2,b,1482669087000000000 +" +``` + +##### Dry run writing annotated CSV data from a file +```sh +influx write dryrun \ + --bucket example-bucket \ + --file path/to/data.csv +``` + +##### Dry run writing annotated CSV data from multiple files +```sh +influx write dryrun \ + --bucket example-bucket \ + --file path/to/data-1.csv \ + --file path/to/data-2.csv +``` + +##### Dry run writing annotated CSV data from a URL +```sh +influx write dryrun \ + --bucket example-bucket \ + --url https://example.com/data.csv +``` + +##### Dry run writing annotated CSV data from multiple URLs +```sh +influx write dryrun \ + --bucket example-bucket \ + --url https://example.com/data-1.csv \ + --url https://example.com/data-2.csv +``` + +##### Dry run writing annotated CSV data from multiple sources +```sh +influx write dryrun \ + --bucket example-bucket \ + --file path/to/data-1.csv \ + --url https://example.com/data-2.csv +``` + +##### Dry run prepending CSV data with annotation headers +```sh +influx write dryrun \ + --bucket example-bucket \ + --header "#constant measurement,birds" \ + --header "#datatype dataTime:2006-01-02,long,tag" \ + --file path/to/data.csv +``` diff --git a/content/influxdb/v2.5/reference/cli/influxd/_index.md b/content/influxdb/v2.5/reference/cli/influxd/_index.md new file mode 100644 index 000000000..f054d93a2 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/_index.md @@ -0,0 +1,45 @@ +--- +title: influxd - InfluxDB service +description: The `influxd` service starts and runs all the processes necessary for InfluxDB to function. +influxdb/v2.5/tags: [influxd, cli] +menu: + influxdb_2_5_ref: + name: influxd + parent: Command line tools +weight: 102 +related: + - /influxdb/v2.5/reference/config-options/ +products: [oss] +--- + +The `influxd` daemon starts and runs all the processes necessary for InfluxDB to function. + +## Usage + +``` +influxd [flags] +influxd [command] +``` + +{{% note %}} +For information about other available InfluxDB configuration methods, see +[InfluxDB configuration options](/influxdb/v2.5/reference/config-options/). +{{% /note %}} + +## Commands + +| Command | Description | +| :----------------------------------------------------------------- | :----------------------------------------------------------- | +| [downgrade](/influxdb/v2.5/reference/cli/influxd/downgrade/) | Downgrade metadata schema to match an older release | +| help | Output help information for `influxd` | +| [inspect](/influxdb/v2.5/reference/cli/influxd/inspect/) | Inspect on-disk database data | +| [print-config](/influxdb/v2.5/reference/cli/influxd/print-config/) | (**Deprecated**) Print full influxd configuration for the current environment | +| [recovery](/influxdb/v2.5/reference/cli/influxd/recovery/) | Recover operator access to InfluxDB | +| [run](/influxdb/v2.5/reference/cli/influxd/run/) | Start the influxd server _**(default)**_ | +| [upgrade](/influxdb/v2.5/reference/cli/influxd/upgrade/) | Upgrade a 1.x version of InfluxDB to {{< current-version >}} | +| [version](/influxdb/v2.5/reference/cli/influxd/version/) | Output the current version of InfluxDB | + +## Flags + + +{{< cli/influxd-flags >}} diff --git a/content/influxdb/v2.5/reference/cli/influxd/downgrade.md b/content/influxdb/v2.5/reference/cli/influxd/downgrade.md new file mode 100644 index 000000000..be02d8113 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/downgrade.md @@ -0,0 +1,48 @@ +--- +title: influxd downgrade +description: > + The `influxd downgrade` command downgrades the metadata schema used by + `influxd` to match the schema of an older release. +menu: + influxdb_2_5_ref: + parent: influxd +weight: 201 +related: + - /influxdb/v2.5/upgrade/downgrade/ +--- + +Use the `influxd downgrade` command to downgrade the metadata schema used by +`influxd` to match the metadata schema of a older release. + +InfluxDB does not guarantee backwards-compatibility with earlier releases. +Attempting to start an older `influxd` binary with a BoltDB or SQLite file that +has been migrated to a newer schema will result in a startup error. +This command downgrades metadata schemas to match the schemas of an older release +and allows the older `influxd` binary to boot successfully. + +{{% note %}} +Run this command **prior** to downgrading the `influxd` binary. +{{% /note %}} + +## Usage + +```sh +influxd downgrade [flags] +``` + +## Flags + +| Flag | | Description | Input type | +| :--- | :-------------- | :------------------------------------------------------------------------------- | :--------: | +| | `--bolt-path` | Path to BoltDB database (default is `~/.influxdbv2/influxd.bolt`) | string | +| `-h` | `--help` | Help for `downgrade` | | +| | `--log-level` | Log level (`debug`, `info` _(default_), `warn` or `error`) | string | +| | `--sqlite-path` | Path to SQLite database (default is `influxd.sqlite` in the bolt path directory) | string | + + +## Examples + +##### Downgrade to InfluxDB 2.0 +```sh +influxd downgrade 2.0 +``` \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/_index.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/_index.md new file mode 100644 index 000000000..4ee2b5c4c --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/_index.md @@ -0,0 +1,38 @@ +--- +title: influxd inspect +description: The `influxd inspect` commands and subcommands inspecting on-disk InfluxDB time series data. +influxdb/v2.5/tags: [inspect] +menu: + influxdb_2_5_ref: + parent: influxd +weight: 201 +--- + +The `influxd inspect` commands and subcommands inspecting on-disk InfluxDB time series data. + +## Usage +```sh +influxd inspect [subcommand] +``` + +## Subcommands +| Subcommand | Description | +| :----------------------------------------------------------------------------------- | :-------------------------------------- | +| [build-tsi](/influxdb/v2.5/reference/cli/influxd/inspect/build-tsi/) | Rebuild the TSI index and series file | +| [delete-tsm](/influxdb/v2.5/reference/cli/influxd/inspect/delete-tsm/) | Delete a measurement from a TSM file | +| [dump-tsi](/influxdb/v2.5/reference/cli/influxd/inspect/dump-tsi/) | Output low level TSI information | +| [dump-tsm](/influxdb/v2.5/reference/cli/influxd/inspect/dump-tsm/) | Output low level TSM information | +| [dump-wal](/influxdb/v2.5/reference/cli/influxd/inspect/dump-wal/) | Output TSM data from WAL files | +| [export-index](/influxdb/v2.5/reference/cli/influxd/inspect/export-index/) | Export TSI index data | +| [export-lp](/influxdb/v2.5/reference/cli/influxd/inspect/export-lp/) | Export TSM data to line protocol | +| [report-tsi](/influxdb/v2.5/reference/cli/influxd/inspect/report-tsi/) | Report the cardinality of TSI files | +| [report-tsm](/influxdb/v2.5/reference/cli/influxd/inspect/report-tsm/) | Report information about TSM files | +| [verify-seriesfile](/influxdb/v2.5/reference/cli/influxd/inspect/verify-seriesfile/) | Verify the integrity of series files | +| [verify-tombstone](/influxdb/v2.5/reference/cli/influxd/inspect/verify-tombstone/) | Verify the integrity of tombstone files | +| [verify-tsm](/influxdb/v2.5/reference/cli/influxd/inspect/verify-tsm/) | Verify the integrity of TSM files | +| [verify-wal](/influxdb/v2.5/reference/cli/influxd/inspect/verify-wal/) | Verify the integrity of WAL files | + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `inspect` command | diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/build-tsi.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/build-tsi.md new file mode 100644 index 000000000..1e0e52fe5 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/build-tsi.md @@ -0,0 +1,59 @@ +--- +title: influxd inspect build-tsi +description: > + The `influxd inspect build-tsi` command rebuilds the TSI index and, if necessary, + the series file. +influxdb/v2.5/tags: [tsi] +menu: + influxdb_2_5_ref: + parent: influxd inspect +weight: 301 +--- + +The `influxd inspect build-tsi` command rebuilds the TSI index and, if necessary, +the series file. + +## Usage +```sh +influxd inspect build-tsi [flags] +``` + +InfluxDB builds the index by reading all Time-Structured Merge tree (TSM) indexes +and Write Ahead Log (WAL) entries in the TSM and WAL data directories. +If the series file directory is missing, it rebuilds the series file. +If the TSI index directory already exists, the command will fail. + +### Adjust performance +Use the following options to adjust the performance of the indexing process: + +##### --max-log-file-size +`--max-log-file-size` determines how much of an index to store in memory before +compacting it into memory-mappable index files. +If you find the memory requirements of your TSI index are too high, consider +decreasing this setting. + +##### --max-cache-size +`--max-cache-size` defines the maximum cache size. +The indexing process replays WAL files into a `tsm1.Cache`. +If the maximum cache size is too low, the indexing process will fail. +Increase `--max-cache-size` to account for the size of your WAL files. + +##### --batch-size +`--batch-size` defines the size of the batches written into the index. +Altering the batch size can improve performance but may result in significantly +higher memory usage. + +## Flags +| Flag | | Description | Input Type | +| :--- | :---------------------- | :---------------------------------------------------------------------------------------------- | :--------: | +| | `--batch-size` | Size of the batches to write to the index. Defaults to `10000`. [See above](#--batch-size). | integer | +| | `--bucket-id` | Bucket ID (required if `--shard-id` is present). | string | +| | `--compact-series-file` | Compact existing series file. Does not rebuilt index. | | +| | `--concurrency` | Number of workers to dedicate to shard index building. Defaults to `GOMAXPROCS` (8 by default). | integer | +| | `--data-path` | Path to the TSM data directory. Default is `~/.influxdbv2/engine/data`. | string | +| `-h` | `--help` | Help for the `build-tsi` command. | | +| | `--max-cache-size` | Maximum cache size. Defaults to `1073741824`. [See above](#--max-cache-size). | uinteger | +| | `--max-log-file-size` | Maximum log file size. Defaults to `1048576`. [See above](#--max-log-file-size) . | integer | +| | `--shard-id` | Shard ID (requires a `--bucket-id`). | string | +| `-v` | `--verbose` | Enable verbose output. | | +| | `--wal-path` | Path to the WAL data directory. Defaults to `~/.influxdbv2/engine/wal`. | string | diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/delete-tsm.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/delete-tsm.md new file mode 100644 index 000000000..ccf7248dc --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/delete-tsm.md @@ -0,0 +1,38 @@ +--- +title: influxd inspect delete-tsm +description: > + The `influxd inspect delete-tsm` command deletes a measurement from a raw TSM file. +influxdb/v2.5/tags: [tsm] +menu: + influxdb_2_5_ref: + parent: influxd inspect +weight: 301 +--- + +The `influxd inspect delete-tsm` command deletes a measurement from a raw TSM file. + +## Usage +```sh +influxd inspect delete-tsm [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :--- | :-------------- | :---------------------------------------------------- | :--------: | +| `-h` | `--help` | Help for `delete-tsm` | | +| | `--measurement` | Name of the measurement to delete | string | +| | `--sanitize` | Remove all keys with non-printable unicode characters | | +| `-v` | `--verbose` | Enable verbose logging | | + +## Examples + +##### Delete a measurement from a TSM file +```sh +influxd inspect delete-tsm \ + --measurement example-measurement +``` + +##### Remove non-printable unicode characters from all TSM files +```sh +influxd inspect delete-tsm --sanitize +``` diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/dump-tsi.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/dump-tsi.md new file mode 100644 index 000000000..a868b484a --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/dump-tsi.md @@ -0,0 +1,32 @@ +--- +title: influxd inspect dump-tsi +description: > + The `influxd inspect dump-tsi` command outputs low-level information about `tsi1` files. +influxdb/v2.5/tags: [tsi, inspect] +menu: + influxdb_2_5_ref: + parent: influxd inspect +weight: 301 +--- + +The `influxd inspect dump-tsi` command outputs low-level information about +Time Series Index (`tsi1`) files. + +## Usage +```sh +influxd inspect dump-tsi [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :--- | :--------------------- | :------------------------------------- | :--------: | +| `-h` | `--help` | Help for the `dump-tsi` command. | | +| | `--measurement-filter` | Regular expression measurement filter. | string | +| | `--measurements` | Show raw measurement data. | | +| | `--series` | Show raw series data. | | +| | `--series-file` | Path to series file. | string | +| | `--tag-key-filter` | Regular expression tag key filter. | string | +| | `--tag-keys` | Show raw tag key data. | | +| | `--tag-value-filter` | Regular expression tag value filter. | string | +| | `--tag-value-series` | Show raw series data for each value. | | +| | `--tag-values` | Show raw tag value data. | | diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/dump-tsm.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/dump-tsm.md new file mode 100644 index 000000000..a9e8c9b71 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/dump-tsm.md @@ -0,0 +1,30 @@ +--- +title: influxd inspect dump-tsm +description: > + The `influxd inspect dump-tsm` command outputs low-level information about `tsi1` files. +influxdb/v2.5/tags: [tsm, inspect] +menu: + influxdb_2_5_ref: + parent: influxd inspect +weight: 301 +--- + +The `influxd inspect dump-tsm` command outputs low-level information about +Time Series Merge Tree (`tsm1`) files. + +## Usage +```sh +influxd inspect dump-tsm [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :--- | :------------- | :--------------------------------------------- | :--------: | +| | `--all` | Output all TSM data {{< req " \*" >}} | | +| | `--blocks` | Output raw block data. | | +| | `--file-path` | Path to TSM file. | string | +| | `--filter-key` | Only display data matching this key substring. | string | +| `-h` | `--help` | Help for `dump-tsm`. | | +| | `--index` | Dump raw index data. | | + +_{{< req "\*" >}} Using the `--all` flag may print a significant amount of information._ diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/dump-wal.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/dump-wal.md new file mode 100644 index 000000000..3a0724583 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/dump-wal.md @@ -0,0 +1,43 @@ +--- +title: influxd inspect dump-wal +description: > + The `influxd inspect dump-wal` command outputs data from WAL files for debugging purposes. +influxdb/v2.5/tags: [wal, inspect] +menu: + influxdb_2_5_ref: + parent: influxd inspect +weight: 301 +--- + +The `influxd inspect dump-wal` command outputs data from Write Ahead Log (WAL) +files for debugging purposes. +Given at least one WAL file path as an argument, the tool parses and prints +out the entries in each file. + +## Usage +```sh +influxd inspect dump-wal [flags] +``` + +## Output details +The `--find-duplicates` flag determines the `influxd inspect dump-wal` output. + +**Without `--find-duplicates`**, the command outputs the following for each file: + +- The file name +- For each entry in a file: + - The type of the entry (`[write]` or `[delete-bucket-range]`) + - The formatted entry contents + +**With `--find-duplicates`**, the command outputs the following for each file): + +- The file name +- A list of keys with timestamps in the wrong order + + + +## Flags +| Flag | | Description | +|:---- |:--- |:----------- | +| | `--find-duplicates` | Ignore dumping entries; only report keys in the WAL that are out of order. | +| `-h` | `--help` | Help for the `dump-wal` command. | diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/export-index.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/export-index.md new file mode 100644 index 000000000..8167b5170 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/export-index.md @@ -0,0 +1,26 @@ +--- +title: influxd inspect export-index +description: > + The `influxd inspect export-index` command exports all series in a TSI index to + SQL format for inspection and debugging. +influxdb/v2.5/tags: [inspect] +menu: + influxdb_2_5_ref: + parent: influxd inspect +weight: 301 +--- + +The `influxd inspect export-index` command exports all series in a TSI index to +SQL format for inspection and debugging. + +## Usage +```sh +influxd inspect export-index [flags] +``` + +## Flags +| Flag | | Description | Input type | +| :--- | :-------------- | :---------------------------------------------------------------------------------------------------------------------------- | :--------: | +| `-h` | `--help` | Help for the `export-index` command. | | +| | `--index-path` | Path to the [`index` directory](/influxdb/v2.5/reference/internals/file-system-layout/#tsm-directories-and-files-layout). | string | +| | `--series-path` | Path to the [`_series` directory]((/influxdb/v2.5/reference/internals/file-system-layout/#tsm-directories-and-files-layout)). | string | diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/export-lp.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/export-lp.md new file mode 100644 index 000000000..d0be3b48d --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/export-lp.md @@ -0,0 +1,133 @@ +--- +title: influxd inspect export-lp +description: > + The `influxd inspect export-lp` command exports all time series data in a bucket + as line protocol. +influxdb/v2.5/tags: [inspect, export] +menu: + influxdb_2_5_ref: + parent: influxd inspect +weight: 301 +--- + +The `influxd inspect export-lp` command exports all time-structured merge tree (TSM) +data in a bucket to [line protocol](/influxdb/v2.5/reference/syntax/line-protocol/). + +## Usage +```sh +influxd inspect export-lp [flags] +``` + +## Flags +| Flag | | Description | Input type | +|:---- |:--- |:----------- |:----------:| +| | `--bucket-id` | ({{< req >}}) Bucket ID | string | +| | `--compress` | Compress output with GZIP | | +| | `--end` | End time to export (RFC3339 format) | string | +| | `--engine-path` | ({{< req >}}) Path to persistent InfluxDB engine files | string | +| `-h` | `--help` | Help for the `export-lp` command. | | +| | `--log-level` | Log-level (`debug`, `info` _(default)_, or `error`) | string | +| | `--measurement` | Measurement name(s) to export | strings | +| | `--output-path` | ({{< req >}}) Output path (file path or stdout _(`-`)_) | string | +| | `--start` | Start time to export (RFC3339 format) | string | + +## Examples + +- [Export all data in a bucket as line protocol](#export-all-data-in-a-bucket-as-line-protocol) +- [Export data in measurements as line protocol](#export-data-in-measurements-as-line-protocol) +- [Export data in specified time range as line protocol](#export-data-in-specified-time-range-as-line-protocol) + +##### Export all data in a bucket as line protocol +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[To a file](#) +[To stdout](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +influxd inspect export-lp \ + --bucket-id 12ab34cd56ef \ + --engine-path ~/.influxdbv2/engine \ + --output-path path/to/export.lp +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sh +influxd inspect export-lp \ + --bucket-id 12ab34cd56ef \ + --engine-path ~/.influxdbv2/engine \ + --output-path - +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +##### Export data in measurements as line protocol + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[To a file](#) +[To stdout](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +# Export a single measurement +influxd inspect export-lp \ + --bucket-id 12ab34cd56ef \ + --engine-path ~/.influxdbv2/engine \ + --measurement example-measurement \ + --output-path path/to/export.lp + +# Export multiple measurements +influxd inspect export-lp \ + --bucket-id 12ab34cd56ef \ + --engine-path ~/.influxdbv2/engine \ + --measurement example-measurement-1 example-measurement-2 \ + --output-path path/to/export.lp +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sh +# Export a single measurement +influxd inspect export-lp \ + --bucket-id 12ab34cd56ef \ + --engine-path ~/.influxdbv2/engine \ + --measurement example-measurement \ + --output-path - + +# Export multiple measurements +influxd inspect export-lp \ + --bucket-id 12ab34cd56ef \ + --engine-path ~/.influxdbv2/engine \ + --measurement example-measurement-1 example-measurement-2 \ + --output-path - +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +##### Export data in specified time range as line protocol +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[To a file](#) +[To stdout](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +influxd inspect export-lp \ + --bucket-id 12ab34cd56ef \ + --engine-path ~/.influxdbv2/engine \ + --start 2021-01-01T00:00:00Z \ + --end 2021-01-31T23:59:59Z \ + --output-path path/to/export.lp +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sh +influxd inspect export-lp \ + --bucket-id 12ab34cd56ef \ + --engine-path ~/.influxdbv2/engine \ + --start 2021-01-01T00:00:00Z \ + --end 2021-01-31T23:59:59Z \ + --output-path - +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/report-tsi.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/report-tsi.md new file mode 100644 index 000000000..46746b435 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/report-tsi.md @@ -0,0 +1,29 @@ +--- +title: influxd inspect report-tsi +description: > + The `influxd inspect report-tsi` command analyzes Time Series Index (TSI) files + in a storage directory and reports the cardinality of data stored in the files. +influxdb/v2.5/tags: [tsi, cardinality, inspect] +menu: + influxdb_2_5_ref: + parent: influxd inspect +weight: 301 +--- + +The `influxd inspect report-tsi` command analyzes Time Series Index (TSI) within +a specified bucket and reports the cardinality of data stored in the bucket +segmented by shard and measurement. + +## Usage +```sh +influxd inspect report-tsi [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :---- | :--------------- | :------------------------------------------------------------------------------------------- | :--------: | +| `-b` | `--bucket-id` | ({{< req >}}) Process data for specified bucket ID. | string | +| `-c ` | `--concurrency` | Number of workers to run concurrently (default is the number of available processing units). | integer | +| | `--data-path` | Path to data directory (default `~/.influxdbv2/engine/data`). | string | +| `-h` | `--help` | View Help for the `report-tsi` command. | | +| `-t` | `-top` | Limit results to the top n. | integer | diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/report-tsm.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/report-tsm.md new file mode 100644 index 000000000..dbf5c2c61 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/report-tsm.md @@ -0,0 +1,55 @@ +--- +title: influxd inspect report-tsm +description: > + The `influxd inspect report-tsm` command analyzes Time-Structured Merge Tree (TSM) + files within a storage engine directory and reports the cardinality within the files + and the time range the data covers. +influxdb/v2.5/tags: [tsm, cardinality, inspect] +menu: + influxdb_2_5_ref: + parent: influxd inspect +weight: 301 +--- + +The `influxd inspect report-tsm` command analyzes Time-Structured Merge Tree (TSM) +files within a storage engine directory and reports the cardinality within the files +and the time range the data covers. + +This command only reports on the index within each TSM file. +It does not read any block data. +To reduce heap requirements, by default `report-tsm` estimates the overall +cardinality in the file set by using the HLL++ algorithm. +Determine exact cardinalities by using the `--exact` flag. + +## Usage +```sh +influxd inspect report-tsm [flags] +``` + +## Output details +`influxd inspect report-tsm` outputs the following for each TSM file: + +- The full file name. +- The series cardinality within the file. +- The number of series first encountered within the file. +- The minimum and maximum timestamp associated with TSM data in the file. +- The time to load the TSM index and apply any tombstones. + +The summary section then outputs the total time range and series cardinality for +the file set. Depending on the `--detailed` flag, series cardinality is segmented +in the following ways: + +- Series cardinality for each organization. +- Series cardinality for each bucket. +- Series cardinality for each measurement. +- Number of field keys for each measurement. +- Number of tag values for each tag key. + +## Flags +| Flag | | Description | Input Type | +| :--- | :------------ | :----------------------------------------------------------------------------------------------- | :--------: | +| | `--data-path` | Path to data directory (defaults to `~/.influxdbv2/engine/data`). | string | +| | `--detailed` | Emit series cardinality segmented by measurements, tag keys, and fields. _**May take a while**_. | | +| | `--exact` | Calculate an exact cardinality count. _**May use significant memory**_. | | +| `-h` | `--help` | Help for the `report-tsm` command. | | +| | `--pattern` | Only process TSM files containing pattern. | string | diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/verify-seriesfile.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/verify-seriesfile.md new file mode 100644 index 000000000..74f85e747 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/verify-seriesfile.md @@ -0,0 +1,27 @@ +--- +title: influxd inspect verify-seriesfile +description: > + The `influxd inspect verify-seriesfile` command verifies the integrity of series files. +influxdb/v2.5/tags: [inspect] +menu: + influxdb_2_5_ref: + parent: influxd inspect +weight: 301 +--- + +The `influxd inspect verify-seriesfile` command verifies the integrity of series files. + +## Usage +```sh +influxd inspect verify-seriesfile [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :--- | :-------------- | :-------------------------------------------------------------------------------------------- | :--------: | +| | `--bucket-id` | Verify series files from a specific bucket. | string | +| `-c` | `--concurrency` | Number of workers to run concurrently (defaults to the number of available processing units). | integer | +| | `--data-path` | Path to data directory (defaults to `~/.influxdbv2/engine/data`). | string | +| `-h` | `--help` | Help for the `verify-seriesfile` command. | | +| | `--series-path` | Path to series file (overrides `--data-path` and `--bucket-id`). | string | +| `-v` | `--verbose` | Enable verbose output. | | diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/verify-tombstone.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/verify-tombstone.md new file mode 100644 index 000000000..50129ae09 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/verify-tombstone.md @@ -0,0 +1,25 @@ +--- +title: influxd inspect verify-tombstone +description: > + The `influxd inspect verify-tombstone` command verifies the integrity of tombstone files. +influxdb/v2.5/tags: [inspect] +menu: + influxdb_2_5_ref: + parent: influxd inspect +weight: 301 +--- + +The `influxd inspect verify-tombstone` command verifies the integrity of tombstone files. + +## Usage +```sh +influxd inspect verify-tombstone [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :--- | :-------------- | :------------------------------------------------------------------- | :--------: | +| | `--engine-path` | Path to find tombstone files (defaults to `~/.influxdbv2/engine`). | string | +| `-h` | `--help` | Help for `verify-tombstone`. | | +| `-v` | `--verbose` | Verbose output (emit periodic progress). | | +| | `--vv` | Very verbose output (emit every tombstone entry key and time range). | | diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/verify-tsm.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/verify-tsm.md new file mode 100644 index 000000000..7ee71c696 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/verify-tsm.md @@ -0,0 +1,26 @@ +--- +title: influxd inspect verify-tsm +description: > + The `influxd inspect verify-tsm` command analyzes a set of TSM files for inconsistencies + between the TSM index and the blocks. +influxdb/v2.5/tags: [tsm, inspect] +menu: + influxdb_2_5_ref: + parent: influxd inspect +weight: 301 +--- + +The `influxd inspect verify-tsm` command verifies the integrity of TSM files. + +## Usage +```sh +influxd inspect verify-tsm [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :--- | :-------------- | :---------------------------------------------------------------------- | :--------: | +| | `--check-utf8` | Verify series keys are valid UTF-8 (skips block checksum verification). | | +| | `--engine-path` | Storage engine directory path (default is `~/.influxdbv2/engine`). | string | +| `-h` | `--help` | Help for `verify-tsm`. | | +| `-v` | `--verbose` | Enable verbose logging. | | diff --git a/content/influxdb/v2.5/reference/cli/influxd/inspect/verify-wal.md b/content/influxdb/v2.5/reference/cli/influxd/inspect/verify-wal.md new file mode 100644 index 000000000..bd6a8e110 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/inspect/verify-wal.md @@ -0,0 +1,40 @@ +--- +title: influxd inspect verify-wal +description: > + The `influxd inspect verify-wal` command analyzes the Write-Ahead Log (WAL) + to check if there are any corrupt files. +influxdb/v2.5/tags: [wal, inspect] +menu: + influxdb_2_5_ref: + parent: influxd inspect +weight: 301 +--- + +The `influxd inspect verify-wal` command analyzes the Write-Ahead Log (WAL) +to check if there are any corrupt files. +If it finds corrupt files, the command returns the names of those files. +It also returns the total number of entries in each scanned WAL file. + +## Usage +```sh +influxd inspect verify-wal [flags] +``` + +## Output details +`influxd inspect verify-wal` outputs the following for each file: + +- The file name. +- The first position of any identified corruption or "clean" if no corruption is found. + +After the verification is complete, it returns a summary with: + +- The number of WAL files scanned. +- The number of WAL entries scanned. +- A list of files found to be corrupt. + +## Flags +| Flag | | Description | Input Type | +| :--- | :----------- | :------------------------------------------------------------ | :--------: | +| `-h` | `--help` | Help for the `verify-wal` command. | | +| `-v` | `--verbose` | Enable verbose logging. | | +| | `--wal-path` | Path to WAL directory (default is `~/.influxdbv2/engine/wal`) | string | diff --git a/content/influxdb/v2.5/reference/cli/influxd/recovery/_index.md b/content/influxdb/v2.5/reference/cli/influxd/recovery/_index.md new file mode 100644 index 000000000..fb5a4df77 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/recovery/_index.md @@ -0,0 +1,33 @@ +--- +title: influxd recovery +description: > + The `influxd recovery` command and subcommands provide tools for recovering + operator access to InfluxDB by directly modifying authorization, organization, + and user data stored on disk. +menu: + influxdb_2_5_ref: + parent: influxd +weight: 201 +--- + +The `influxd recovery` command and subcommands provide tools for recovering +operator access to InfluxDB by directly modifying authorization, organization, +and user data stored on disk. + +## Usage +```sh +influxd recovery [flags] +influxd recovery [command] +``` + +## Subcommands +| Subcommand | Description | +| :---------------------------------------------------------- | :--------------------------------------------- | +| [auth](/influxdb/v2.5/reference/cli/influxd/recovery/auth/) | Manage on-disk authorization data for recovery | +| [org](/influxdb/v2.5/reference/cli/influxd/recovery/org/) | Manage on-disk organization data for recovery | +| [user](/influxdb/v2.5/reference/cli/influxd/recovery/user/) | Manage on-disk user data for recovery | + +## Flags +| Flag | | Description | +| :--- | :------- | :------------------ | +| `-h` | `--help` | Help for `recovery` | \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influxd/recovery/auth/_index.md b/content/influxdb/v2.5/reference/cli/influxd/recovery/auth/_index.md new file mode 100644 index 000000000..25cdb8056 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/recovery/auth/_index.md @@ -0,0 +1,30 @@ +--- +title: influxd recovery auth +description: > + The `influxd recovery auth` command and subcommands manage on-disk authorization + data for recovery purposes. +menu: + influxdb_2_5_ref: + parent: influxd recovery +weight: 301 +--- + +The `influxd recovery auth` command and subcommands manage on-disk authorization +data for recovery purposes. + +## Usage +```sh +influxd recovery auth [flags] +influxd recovery auth [command] +``` + +## Subcommands +| Subcommand | Description | +| :------------------------------------------------------------------------------------- | :----------------------------------- | +| [create-operator](/influxdb/v2.5/reference/cli/influxd/recovery/auth/create-operator/) | Create new operator token for a user | +| [list](/influxdb/v2.5/reference/cli/influxd/recovery/auth/list/) | List authorizations | + +## Flags +| Flag | | Description | +| :---- | :------- | :-------------- | +| `-h ` | `--help` | Help for `auth` | \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influxd/recovery/auth/create-operator.md b/content/influxdb/v2.5/reference/cli/influxd/recovery/auth/create-operator.md new file mode 100644 index 000000000..690feb4cd --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/recovery/auth/create-operator.md @@ -0,0 +1,41 @@ +--- +title: influxd recovery auth create-operator +description: > + The `influxd recovery auth create-operator` command creates new + [Operator token](/influxdb/v2.5/security/tokens/#operator-token) directly on disk + for a specified user. +menu: + influxdb_2_5_ref: + parent: influxd recovery auth +weight: 401 +--- + +The `influxd recovery auth create-operator` command creates a new +[Operator token](/influxdb/v2.5/security/tokens/#operator-token) directly on disk +for a specified user. + +{{% note %}} +This command can only be executed when the InfluxDB server (`influxd`) is not running. +{{% /note %}} + +## Usage +```sh +influxd recovery auth create-operator [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :--- | :------------ | :------------------------------------------------------------- | :--------: | +| | `--bolt-path` | Path to the BoltDB file (default `~/.influxdbv2/influxd.bolt`) | string | +| `-h` | `--help` | Help for `create-operator` | | +| | `--org` | ({{< req >}}) Organization name | string | +| | `--username` | ({{< req >}}) Username to assign the operator token to | string | + +## Examples + +##### Generate a new operator token +```sh +influxd \ + --org example-org \ + --username example-user +``` \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influxd/recovery/auth/list.md b/content/influxdb/v2.5/reference/cli/influxd/recovery/auth/list.md new file mode 100644 index 000000000..b08626240 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/recovery/auth/list.md @@ -0,0 +1,28 @@ +--- +title: influxd recovery auth list +description: > + The `influxd recovery auth list` command lists authorizations and data + associated with each authorization stored on disk. +menu: + influxdb_2_5_ref: + parent: influxd recovery auth +weight: 401 +--- + +The `influxd recovery org list` command lists authorizations stored on disk and +outputs data associated with each authorization. + +{{% note %}} +This command can only be executed when the InfluxDB server (`influxd`) is not running. +{{% /note %}} + +## Usage +```sh +influxd recovery auth list [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :--- | :------------ | :------------------------------------------------------------- | :--------: | +| | `--bolt-path` | Path to the BoltDB file (default `~/.influxdbv2/influxd.bolt`) | string | +| `-h` | `--help` | Help for `list` | | \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influxd/recovery/org/_index.md b/content/influxdb/v2.5/reference/cli/influxd/recovery/org/_index.md new file mode 100644 index 000000000..190f9f242 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/recovery/org/_index.md @@ -0,0 +1,30 @@ +--- +title: influxd recovery org +description: > + The `influxd recovery org` command and subcommands manage on-disk organization + data for recovery purposes. +menu: + influxdb_2_5_ref: + parent: influxd recovery +weight: 301 +--- + +The `influxd recovery org` command and subcommands manage on-disk organization +data for recovery purposes. + +## Usage +```sh +influxd recovery org [flags] +influxd recovery org [command] +``` + +## Subcommands +| Subcommand | Description | +| :------------------------------------------------------------------ | :----------------- | +| [create](/influxdb/v2.5/reference/cli/influxd/recovery/org/create/) | Create new org | +| [list](/influxdb/v2.5/reference/cli/influxd/recovery/org/list/) | List organizations | + +## Flags +| Flag | | Description | +| :--- | :------- | :------------- | +| `-h` | `--help` | Help for `org` | \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influxd/recovery/org/create.md b/content/influxdb/v2.5/reference/cli/influxd/recovery/org/create.md new file mode 100644 index 000000000..50273be3e --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/recovery/org/create.md @@ -0,0 +1,36 @@ +--- +title: influxd recovery org create +description: > + The `influxd recovery org create` command creates a new organization directly + on disk for recovery purposes. +menu: + influxdb_2_5_ref: + parent: influxd recovery org +weight: 401 +--- + +The `influxd recovery org create` command creates a new organization directly +on disk for recovery purposes. + +{{% note %}} +This command can only be executed when the InfluxDB server (`influxd`) is not running. +{{% /note %}} + +## Usage +```sh +influxd recovery org create [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :--- | :------------ | :------------------------------------------------------------- | :--------: | +| | `--bolt-path` | Path to the BoltDB file (default `~/.influxdbv2/influxd.bolt`) | string | +| `-h` | `--help` | Help for `create` | | +| | `--org` | Organization name | string | + +## Examples + +##### Create a new organization directly on disk +```sh +influxd recovery org create --org example-org +``` \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influxd/recovery/org/list.md b/content/influxdb/v2.5/reference/cli/influxd/recovery/org/list.md new file mode 100644 index 000000000..7aa5e7d35 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/recovery/org/list.md @@ -0,0 +1,28 @@ +--- +title: influxd recovery org list +description: > + The `influxd recovery org list` command lists organizations stored on disk and + outputs data associated with each organization. +menu: + influxdb_2_5_ref: + parent: influxd recovery org +weight: 401 +--- + +The `influxd recovery org list` command lists organizations stored on disk and +outputs data associated with each organization. + +{{% note %}} +This command can only be executed when the InfluxDB server (`influxd`) is not running. +{{% /note %}} + +## Usage +```sh +influxd recovery org list [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :--- | :------------ | :------------------------------------------------------------- | :--------: | +| | `--bolt-path` | Path to the BoltDB file (default `~/.influxdbv2/influxd.bolt`) | string | +| `-h` | `--help` | Help for `list` | | \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influxd/recovery/user/_index.md b/content/influxdb/v2.5/reference/cli/influxd/recovery/user/_index.md new file mode 100644 index 000000000..99278286f --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/recovery/user/_index.md @@ -0,0 +1,32 @@ +--- +title: influxd recovery user +description: > + The `influxd recovery user` command and subcommands manage on-disk user + data for recovery purposes. +menu: +menu: + influxdb_2_5_ref: + parent: influxd recovery +weight: 301 +--- + +The `influxd recovery user` command and subcommands manage on-disk user +data for recovery purposes. + +## Usage +```sh +influxd recovery user [flags] +influxd recovery user [command] +``` + +## Subcommands +| Subcommand | Description | +| :------------------------------------------------------------------- | :---------------- | +| [create](/influxdb/v2.5/reference/cli/influxd/recovery/user/create/) | Create new user | +| [list](/influxdb/v2.5/reference/cli/influxd/recovery/user/list/) | List users | +| [update](/influxdb/v2.5/reference/cli/influxd/recovery/user/list/) | Update a password | + +## Flags +| Flag | | Description | +| :--- | :------- | :-------------- | +| `-h` | `--help` | Help for `user` | \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influxd/recovery/user/create.md b/content/influxdb/v2.5/reference/cli/influxd/recovery/user/create.md new file mode 100644 index 000000000..2f54d1bc1 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/recovery/user/create.md @@ -0,0 +1,39 @@ +--- +title: influxd recovery user create +description: > + The `influxd recovery user create` command creates a new user directly + on disk for recovery purposes. +menu: + influxdb_2_5_ref: + parent: influxd recovery user +weight: 401 +--- + +The `influxd recovery user create` command creates a new user directly +on disk for recovery purposes. + +{{% note %}} +This command can only be executed when the InfluxDB server (`influxd`) is not running. +{{% /note %}} + +## Usage +```sh +influxd recovery user create [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :--- | :------------ | :------------------------------------------------------------ | :--------: | +| | `--bolt-path` | Path to the BoltDB file (default `~.influxdbv2/influxd.bolt`) | string | +| `-h` | `--help` | Help for `create` | | +| | `--password` | Password for the new user | string | +| | `--username` | Username of the new user | string | + +## Examples + +##### Create a new user directly on disk +```sh +influxd recovery user create \ + --username example-username \ + --password ExAmPL3-paS5W0rD +``` \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influxd/recovery/user/list.md b/content/influxdb/v2.5/reference/cli/influxd/recovery/user/list.md new file mode 100644 index 000000000..bc861f48c --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/recovery/user/list.md @@ -0,0 +1,28 @@ +--- +title: influxd recovery user list +description: > + The `influxd recovery user list` command lists users stored on disk and + outputs data associated with each user. +menu: + influxdb_2_5_ref: + parent: influxd recovery user +weight: 401 +--- + +The `influxd recovery user list` command lists users stored on disk and +outputs data associated with each user. + +{{% note %}} +This command can only be executed when the InfluxDB server (`influxd`) is not running. +{{% /note %}} + +## Usage +```sh +influxd recovery user list [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :--- | :------------ | :------------------------------------------------------------ | :--------: | +| | `--bolt-path` | Path to the BoltDB file (default `~.influxdbv2/influxd.bolt`) | string | +| `-h` | `--help` | Help for `list` | | \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influxd/recovery/user/update.md b/content/influxdb/v2.5/reference/cli/influxd/recovery/user/update.md new file mode 100644 index 000000000..4c2fa115e --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/recovery/user/update.md @@ -0,0 +1,38 @@ +--- +title: influxd recovery user update +description: > + The `influxd recovery user update` command lets you change your password if you forget your credentials. +menu: + influxdb_2_5_ref: + parent: influxd recovery user +weight: 401 +--- + +Use `influxd recovery user update` to update a password. Useful when you forget your username and need to update your credentials. To retrieve all usernames in the system, use [`influxd recovery user list`](/influxdb/v2.5/reference/cli/influxd/recovery/user/list/) or use [`influxd recovery user create`](/influxdb/v2.5/reference/cli/influxd/recovery/user/create/) to create new user for recovery purposes. + +{{% note %}} +This command can only be executed when the InfluxDB server (`influxd`) is not running. +{{% /note %}} + +## Usage +```sh +influxd recovery user update [flags] +``` + +## Flags +| Flag | | Description | Input Type | +| :--- | :------------------- | :------------------------------------------------------------ | :--------: | +| | `--bolt-path` | Path to the BoltDB file (default `~.influxdbv2/influxd.bolt`) | string | +| `-h` | `--help` | Help for `update` | | +| | `--id` or `username` | Enter the ID or name for an existing user | string | +| | `--password` | New password for the specified user | string | + +## Examples + +##### Update a user password + +```sh +influxd recovery user update \ + --username example-username \ + --password ExAmPL3-paS5W0rD +``` \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/cli/influxd/run.md b/content/influxdb/v2.5/reference/cli/influxd/run.md new file mode 100644 index 000000000..0014f6a88 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/run.md @@ -0,0 +1,40 @@ +--- +title: influxd run +description: The `influxd run` command is the default `influxd` command and starts the influxd server. +influxdb/v2.5/tags: [influxd, cli] +menu: + influxdb_2_5_ref: + parent: influxd +weight: 201 +related: + - /influxdb/v2.5/reference/config-options/ +products: [oss] +--- + +The `influxd run` command is the default command for `influxd`. +It starts and runs all the processes necessary for InfluxDB to function. + +## Usage + +``` +influxd run [flags] +``` + + +{{% note %}} +Because `run` is the default command for `influxd`, the following commands are the same: + +```bash +influxd +influxd run +``` +{{% /note %}} + +{{% note %}} +For information about other available InfluxDB configuration methods, see +[InfluxDB configuration options](/influxdb/v2.5/reference/config-options/). +{{% /note %}} + +## Flags + +{{% cli/influxd-flags %}} diff --git a/content/influxdb/v2.5/reference/cli/influxd/upgrade.md b/content/influxdb/v2.5/reference/cli/influxd/upgrade.md new file mode 100644 index 000000000..6fc29e49b --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/upgrade.md @@ -0,0 +1,59 @@ +--- +title: influxd upgrade +description: The `influxd upgrade` command upgrades a InfluxdDB 1.x instance to 2.x. +menu: + influxdb_2_5_ref: + parent: influxd +weight: 201 +products: [oss] +--- + +Use the `influxd upgrade` command to upgrade an instance of InfluxDB 1.x to InfluxDB {{< current-version >}}. +This command copies all data in [databases](/influxdb/v1.8/concepts/glossary/#database) and +[retention policies](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) (used in 1.x) +over to [buckets](/influxdb/v2.5/reference/glossary/#bucket) (used in {{< current-version >}}). + +{{% warn %}} +Be sure to back up all data before upgrading with `influx upgrade`. +{{% /warn %}} + +This command performs the following actions: + +1. Creates the InfluxDB 2.x configuration file using 1.x configuration file options. + Unsupported 1.x options are reported to standard output. + If the configuration file is not available, the 1.x database folder can be passed via th `--v1-dir` flag. +2. Copies and upgrades 1.x database files. + +The target 2.x database directory is specified by the `--engine-path` option. +If changed, the bolt path can be specified by the `--bolt-path` option. + +## Usage + +``` +influxd upgrade [flags] +influxd upgrade [command] +``` + +## Flags + +| Flag | | Description | Input type | +|:-----|:------------------------ |:-----------------------------------------------------------------------------------------------------------|:----------:| +| `-m` | `--bolt-path` | Path for boltdb database (default `~/.influxdbv2/influxd.bolt`) | string | +| `-b` | `--bucket` | Primary bucket name | string | +| | `--config-file` | Custom InfluxDB 1.x config file path (default `~/.influxdb/influxdb.conf`) | string | +| | `--continuous-query-export-path` | Path for exported 1.x continuous queries (default `~/continuous_queries.txt`) | string | +| `-e` | `--engine-path` | Path for persistent engine files (default `~/.influxdbv2/engine`) | string | +| `-f` | `--force` | Skip the confirmation prompt | | +| `-h` | `--help` | Help for `influxd upgrade` | | +| `-c` | `--influx-configs-path` | Path for 2.x `influx` CLI configurations file (default `~/.influxdbv2/configs`) | | +| | `--log-level` | Supported log levels are `debug`, `info`, `warn`, and `error` (default `info`) | string | +| | `--log-path` | Custom log file path (default `~/upgrade.log`) | string | +| `-o` | `--org` | Primary organization name | string | +| | `--overwrite-existing-v2` | Overwrite existing files at the output paths instead of aborting the upgrade process | | +| `-p` | `--password` | Password for username | string | +| `-r` | `--retention` | Duration bucket will retain data (default `0`; retains data infinitely) | string | +| `-t` | `--token` | Token for username. If not specified, a token is auto-generated. | string | +| `-u` | `--username` | Primary username | string | +| | `--v1-dir` | Path to source 1.x `db` directory containing `meta`, `data`, and `wal` sub-folders (default `~/.influxdb`) | string | +| | `--v2-config-path` | Destination path for upgraded 2.x configuration file (default `~/.influxdbv2/config.toml`) | string | +| `-v` | `--verbose` | Verbose output | | diff --git a/content/influxdb/v2.5/reference/cli/influxd/version.md b/content/influxdb/v2.5/reference/cli/influxd/version.md new file mode 100644 index 000000000..9a1ee0cd8 --- /dev/null +++ b/content/influxdb/v2.5/reference/cli/influxd/version.md @@ -0,0 +1,24 @@ +--- +title: influxd version +description: The `influxd version` command outputs the current version of InfluxDB. +influxdb/v2.5/tags: [influxd, cli] +menu: + influxdb_2_5_ref: + parent: influxd +weight: 202 +products: [oss] +--- + +The `influxd version` command outputs the current version of InfluxDB. + +## Usage + +``` +influxd version [flags] +``` + +## Flags + +| Flag | | Description | +|:---- |:--- |:----------- | +| `-h` | `--help` | Help for the `version` command | diff --git a/content/influxdb/v2.5/reference/config-options.md b/content/influxdb/v2.5/reference/config-options.md new file mode 100644 index 000000000..78cca20a5 --- /dev/null +++ b/content/influxdb/v2.5/reference/config-options.md @@ -0,0 +1,3550 @@ +--- +title: InfluxDB configuration options +description: > + Customize your InfluxDB configuration by using [`influxd`](/influxdb/v2.5/reference/cli/influxd/) + configuration flags, setting environment variables, or defining configuration + options in a configuration file. +menu: + influxdb_2_5_ref: + name: Configuration options +weight: 3 +products: [oss] +related: + - /influxdb/v2.5/reference/cli/influxd +--- + +Customize your InfluxDB configuration by using [`influxd`](/influxdb/v2.5/reference/cli/influxd/) +configuration flags, setting environment variables, or defining configuration +options in a configuration file. + +- [View your runtime server configuration](#view-your-runtime-server-configuration) +- [Configuration precedence](#configuration-precedence) +- [InfluxDB configuration file](#influxdb-configuration-file) +- [Configuration options](#configuration-options) + +### View your runtime server configuration + +Use the `influx` CLI or the InfluxDB API to get the runtime server configuration of your InfluxDB instance. + +Server configuration commands require an [Operator token](/influxdb/v2.5/security/tokens/#operator-token). + +#### View your server configuration with the CLI + +Use the [`influx server-config` command](/influxdb/v2.5/reference/cli/influx/server-config/) +to retrieve your runtime server configuration. + +```sh +influx server-config +``` + +### View your server configuration with the API + +Use the `/api/v2/config` InfluxDB API endpoint to retrieve your runtime server configuration. + +[{{< api-endpoint method="GET" endpoint="http://localhost:8086/api/v2/config" >}}]((/influxdb/v2.5/api/#operation/GetConfig)) + +### Configuration precedence +InfluxDB honors configuration settings using the following precedence: + +1. `influxd` flags +2. Environment variables +3. Configuration file settings + +### InfluxDB configuration file + +When `influxd` starts, it checks for a file named `config.*` **in the current working directory**. +The file extension depends on the syntax of the configuration file. +InfluxDB configuration files support the following syntaxes: + +- **YAML** (`.yaml`, `.yml`) +- **TOML** (`.toml`) +- **JSON** (`.json`) + +To customize the directory path of the configuration file, set the `INFLUXD_CONFIG_PATH` +environment variable to your custom path. + +```sh +export INFLUXD_CONFIG_PATH=/path/to/custom/config/directory +``` + +On startup, `influxd` will check for a `config.*` in the `INFLUXD_CONFIG_PATH` directory. + +##### Example configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yaml +query-concurrency: 20 +query-queue-size: 15 +secret-store: vault +session-length: 120 +tls-cert: /path/to/influxdb.crt +tls-key: /path/to/influxdb.key +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +query-concurrency = 20 +query-queue-size = 15 +secret-store = "vault" +session-length = 120 +tls-cert = "/path/to/influxdb.crt" +tls-key = "/path/to/influxdb.key" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "query-concurrency": 20, + "query-queue-size": 15, + "secret-store": "vault", + "session-length": 120, + "tls-cert": "/path/to/influxdb.crt", + "tls-key": "/path/to/influxdb.key" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% note %}} +Only non-default settings need to be defined in the configuration file. +{{% /note %}} + +## Configuration options + +To configure InfluxDB, use the following configuration options when starting the +[`influxd` service](/influxdb/v2.5/reference/cli/influxd): + +- [assets-path](#assets-path) +- [bolt-path](#bolt-path) +- [e2e-testing](#e2e-testing) +- [engine-path](#engine-path) +- [feature-flags](#feature-flags) +- [flux-log-enabled](#flux-log-enabled) +- [hardening-enabled](#hardening-enabled) +- [http-bind-address](#http-bind-address) +- [http-idle-timeout](#http-idle-timeout) +- [http-read-header-timeout](#http-read-header-timeout) +- [http-read-timeout](#http-read-timeout) +- [http-write-timeout](#http-write-timeout) +- [influxql-max-select-buckets](#influxql-max-select-buckets) +- [influxql-max-select-point](#influxql-max-select-point) +- [influxql-max-select-series](#influxql-max-select-series) +- [instance-id](#instance-id) +- [log-level](#log-level) +- [metrics-disabled](#metrics-disabled) +- [nats-max-payload-bytes](#nats-max-payload-bytes) - (deprecated) +- [nats-port](#nats-port) - (deprecated) +- [no-tasks](#no-tasks) +- [pprof-disabled](#pprof-disabled) +- [query-concurrency](#query-concurrency) +- [query-initial-memory-bytes](#query-initial-memory-bytes) +- [query-max-memory-bytes](#query-max-memory-bytes) +- [query-memory-bytes](#query-memory-bytes) +- [query-queue-size](#query-queue-size) +- [reporting-disabled](#reporting-disabled) +- [secret-store](#secret-store) +- [session-length](#session-length) +- [session-renew-disabled](#session-renew-disabled) +- [sqlite-path](#sqlite-path) +- [storage-cache-max-memory-size](#storage-cache-max-memory-size) +- [storage-cache-snapshot-memory-size](#storage-cache-snapshot-memory-size) +- [storage-cache-snapshot-write-cold-duration](#storage-cache-snapshot-write-cold-duration) +- [storage-compact-full-write-cold-duration](#storage-compact-full-write-cold-duration) +- [storage-compact-throughput-burst](#storage-compact-throughput-burst) +- [storage-max-concurrent-compactions](#storage-max-concurrent-compactions) +- [storage-max-index-log-file-size](#storage-max-index-log-file-size) +- [storage-no-validate-field-size](#storage-no-validate-field-size) +- [storage-retention-check-interval](#storage-retention-check-interval) +- [storage-series-file-max-concurrent-snapshot-compactions](#storage-series-file-max-concurrent-snapshot-compactions) +- [storage-series-id-set-cache-size](#storage-series-id-set-cache-size) +- [storage-shard-precreator-advance-period](#storage-shard-precreator-advance-period) +- [storage-shard-precreator-check-interval](#storage-shard-precreator-check-interval) +- [storage-tsm-use-madv-willneed](#storage-tsm-use-madv-willneed) +- [storage-validate-keys](#storage-validate-keys) +- [storage-wal-fsync-delay](#storage-wal-fsync-delay) +- [storage-wal-max-concurrent-writes](#storage-wal-max-concurrent-writes) +- [storage-wal-max-write-delay](#storage-wal-max-write-delay) +- [storage-write-timeout](#storage-write-timeout) +- [store](#store) +- [testing-always-allow-setup](#testing-always-allow-setup) +- [tls-cert](#tls-cert) +- [tls-key](#tls-key) +- [tls-min-version](#tls-min-version) +- [tls-strict-ciphers](#tls-strict-ciphers) +- [tracing-type](#tracing-type) +- [ui-disabled](#ui-disabled) +- [vault-addr](#vault-addr) +- [vault-cacert](#vault-cacert) +- [vault-capath](#vault-capath) +- [vault-client-cert](#vault-client-cert) +- [vault-client-key](#vault-client-key) +- [vault-client-timeout](#vault-client-timeout) +- [vault-max-retries](#vault-max-retries) +- [vault-skip-verify](#vault-skip-verify) +- [vault-tls-server-name](#vault-tls-server-name) +- [vault-token](#vault-token) + +--- + +### assets-path +Override the default InfluxDB user interface (UI) assets by serving assets from the specified directory. +_Typically, InfluxData internal use only._ + +| influxd flag | Environment variable | Configuration key | +| :-------------- | :-------------------- | :---------------- | +| `--assets-path` | `INFLUXD_ASSETS_PATH` | `assets-path` | + +###### influxd flag +```sh +influxd --assets-path=/path/to/custom/assets-dir +``` + +###### Environment variable +```sh +export INFLUXD_ASSETS_PATH=/path/to/custom/assets-dir +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +assets-path: /path/to/custom/assets-dir +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +assets-path = "/path/to/custom/assets-dir" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "assets-path": "/path/to/custom/assets-dir" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### bolt-path +Path to the [BoltDB](https://github.com/boltdb/bolt) database. +BoltDB is a key value store written in Go. +InfluxDB uses BoltDB to store data including organization and +user information, UI data, REST resources, and other key value data. + +**Default:** `~/.influxdbv2/influxd.bolt` + +| influxd flag | Environment variable | Configuration key | +| :------------ | :------------------- | :---------------- | +| `--bolt-path` | `INFLUXD_BOLT_PATH` | `bolt-path` | + +###### influxd flag +```sh +influxd --bolt-path=~/.influxdbv2/influxd.bolt +``` + +###### Environment variable +```sh +export INFLUXD_BOLT_PATH=~/.influxdbv2/influxd.bolt +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +bolt-path: ~/.influxdbv2/influxd.bolt +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +bolt-path = "~/.influxdbv2/influxd.bolt" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "bolt-path": "~/.influxdbv2/influxd.bolt" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### e2e-testing +Add a `/debug/flush` endpoint to the InfluxDB HTTP API to clear stores. +InfluxData uses this endpoint in end-to-end testing. + +| influxd flag | Environment variable | Configuration key | +| :-------------- | :-------------------- | :---------------- | +| `--e2e-testing` | `INFLUXD_E2E_TESTING` | `e2e-testing` | + +###### influxd flag +```sh +influxd --e2e-testing +``` + +###### Environment variable +```sh +export INFLUXD_E2E_TESTING=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +e2e-testing: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +e2e-testing = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "e2e-testing": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### engine-path +Path to persistent storage engine files where InfluxDB stores all +Time-Structure Merge Tree (TSM) data on disk. + +**Default:** `~/.influxdbv2/engine` + +| influxd flag | Environment variable | Configuration key | +| :-------------- | :-------------------- | :---------------- | +| `--engine-path` | `INFLUXD_ENGINE_PATH` | `engine-path` | + +###### influxd flag +```sh +influxd --engine-path=~/.influxdbv2/engine +``` + +###### Environment variable +```sh +export INFLUXD_ENGINE_PATH=~/.influxdbv2/engine +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +engine-path: ~/.influxdbv2/engine +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +engine-path = "~/.influxdbv2/engine" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "engine-path": "~/.influxdbv2/engine" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### feature-flags +Enable, disable, or override default values for feature flags. + +{{% note %}} +Feature flags are used to develop and test experimental features and are +intended for internal use only. +{{% /note %}} + +| influxd flag | Environment variable | Configuration key | +| :---------------- | :---------------------- | :---------------- | +| `--feature-flags` | `INFLUXD_FEATURE_FLAGS` | `feature-flags` | + +###### influxd flag +```sh +influxd --feature-flags flag1=value2,flag2=value2 +``` + +###### Environment variable +```sh +export INFLUXD_FEATURE_FLAGS="{\"flag1\":\value1\",\"flag2\":\"value2\"}" +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +feature-flags: + flag1: "value1" + flag2: "value2" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +[feature-flags] + flag1 = "value1" + glag2 = "value2" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "feature-flags": { + "flag1": "value1", + "flag2": "value2" + } +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### flux-log-enabled + +Include option to show detailed logs for Flux queries, including the following log fields: + +- `compiler_type`: Compiler used for processing the query (will always be Flux). +- `response_size`: Size of the response, in bytes. +- `query`: The textual representation of the query. +- `err`: Errors encountered while processing the query. +- `stat_total_duration`: Total duration to process the query. +- `stat_compile_duration`: Duration to compile the query. +- `stat_execute_duration`: Duration to execute the query. +- `stat_max_allocated`: Maximum amount of memory allocated while processing the query, in - bytes. +- `stat_total_allocated`: Total amount of memory allocated while processing the query, in bytes. This includes memory that was freed and then used again. + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :------------------- | :------------------------- | :----------------- | +| `--flux-log-enabled` | `INFLUXD_FLUX_LOG_ENABLED` | `flux-log-enabled` | + +###### influxd flag +```sh +influxd --flux-log-enabled +``` + +###### Environment variable +```sh +export INFLUXD_FLUX_LOG_ENABLED=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +flux-log-enabled: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +flux-log-enabled = "true" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "flux-log-enabled": "true" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### hardening-enabled + +Enable [additional security features](/influxdb/v2.5/security/enable-hardening/) +in InfluxDB. +**Default:** `false` +| influxd flag | Environment variable | Configuration key | +| :-------------------- | :-------------------------- | :------------------ | +| `--hardening-enabled` | `INFLUXD_HARDENING_ENABLED` | `hardening-enabled` | + +###### influxd flag +```sh +influxd --hardening-enabled +``` + +###### Environment variable +```sh +export INFLUXD_HARDENING_ENABLED=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +hardening-enabled: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +hardening-enabled = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "hardening-enabled": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### http-bind-address +Bind address for the InfluxDB HTTP API. +Customize the URL and port for the InfluxDB API and UI. + +**Default:** `:8086` + +| influxd flag | Environment variable | Configuration key | +| :-------------------- | :-------------------------- | :------------------ | +| `--http-bind-address` | `INFLUXD_HTTP_BIND_ADDRESS` | `http-bind-address` | + +###### influxd flag +```sh +influxd --http-bind-address=:8086 +``` + +###### Environment variable +```sh +export INFLUXD_HTTP_BIND_ADDRESS=:8086 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +http-bind-address: ":8086" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +http-bind-address = ":8086" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "http-bind-address": ":8086" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### http-idle-timeout +Maximum duration the server should keep established connections alive while waiting for new requests. +Set to `0` for no timeout. + +**Default:** `3m0s` + +| influxd flag | Environment variable | Configuration key | +| :-------------------- | :-------------------------- | :------------------ | +| `--http-idle-timeout` | `INFLUXD_HTTP_IDLE_TIMEOUT` | `http-idle-timeout` | + +###### influxd flag +```sh +influxd --http-idle-timeout=3m0s +``` + +###### Environment variable +```sh +export INFLUXD_HTTP_IDLE_TIMEOUT=3m0s +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +http-idle-timeout: 3m0s +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +http-idle-timeout = "3m0s" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "http-idle-timeout": "3m0s" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### http-read-header-timeout +Maximum duration the server should try to read HTTP headers for new requests. +Set to `0` for no timeout. + +**Default:** `10s` + +| influxd flag | Environment variable | Configuration key | +| :--------------------------- | :--------------------------------- | :------------------------- | +| `--http-read-header-timeout` | `INFLUXD_HTTP_READ_HEADER_TIMEOUT` | `http-read-header-timeout` | + +###### influxd flag +```sh +influxd --http-read-header-timeout=10s +``` + +###### Environment variable +```sh +export INFLUXD_HTTP_READ_HEADER_TIMEOUT=10s +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +http-read-header-timeout: 10s +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +http-read-header-timeout = "10s" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "http-read-header-timeout": "10s" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### http-read-timeout +Maximum duration the server should try to read the entirety of new requests. +Set to `0` for no timeout. + +**Default:** `0` + +{{% note %}} +#### Set timeouts specific to your workload +Although no `http-read-timeout` is set by default, we **strongly recommend** +setting a timeout specific to your workload. +HTTP timeouts protect against large amounts of open connections that could +potentially hurt performance. +{{% /note %}} + +| influxd flag | Environment variable | Configuration key | +| :-------------------- | :-------------------------- | :------------------ | +| `--http-read-timeout` | `INFLUXD_HTTP_READ_TIMEOUT` | `http-read-timeout` | + +###### influxd flag +```sh +influxd --http-read-timeout=10s +``` + +###### Environment variable +```sh +export INFLUXD_HTTP_READ_TIMEOUT=10s +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +http-read-timeout: 10s +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +http-read-timeout = "10s" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "http-read-timeout": "10s" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### http-write-timeout +Maximum duration the server should spend processing and responding to write requests. +Set to `0` for no timeout. + +**Default:** `0` + +{{% note %}} +#### Set timeouts specific to your workload +Although no `http-write-timeout` is set by default, we **strongly recommend** +setting a timeout specific to your workload. +HTTP timeouts protect against large amounts of open connections that could +potentially hurt performance. +{{% /note %}} + +| influxd flag | Environment variable | Configuration key | +| :--------------------- | :--------------------------- | :------------------- | +| `--http-write-timeout` | `INFLUXD_HTTP_WRITE_TIMEOUT` | `http-write-timeout` | + +###### influxd flag +```sh +influxd --http-write-timeout=10s +``` + +###### Environment variable +```sh +export INFLUXD_HTTP_WRITE_TIMEOUT=10s +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +http-write-timeout: 10s +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +http-write-timeout = "10s" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "http-write-timeout": "10s" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### influxql-max-select-buckets +Maximum number of group by time buckets a `SELECT` statement can create. +`0` allows an unlimited number of buckets. + +**Default:** `0` + +| influxd flag | Environment variable | Configuration key | +| :------------------------------ | :------------------------------------ | :---------------------------- | +| `--influxql-max-select-buckets` | `INFLUXD_INFLUXQL_MAX_SELECT_BUCKETS` | `influxql-max-select-buckets` | + +###### influxd flag +```sh +influxd --influxql-max-select-buckets=0 +``` + +###### Environment variable +```sh +export INFLUXD_INFLUXQL_MAX_SELECT_BUCKETS=0 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +influxql-max-select-buckets: 0 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +influxql-max-select-buckets = 0 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "influxql-max-select-buckets": 0 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### influxql-max-select-point +Maximum number of points a `SELECT` statement can process. +`0` allows an unlimited number of points. +InfluxDB checks the point count every second (so queries exceeding the maximum aren’t immediately aborted). + +**Default:** `0` + +| influxd flag | Environment variable | Configuration key | +| :---------------------------- | :---------------------------------- | :-------------------------- | +| `--influxql-max-select-point` | `INFLUXD_INFLUXQL_MAX_SELECT_POINT` | `influxql-max-select-point` | + +###### influxd flag +```sh +influxd --influxql-max-select-point=0 +``` + +###### Environment variable +```sh +export INFLUXD_INFLUXQL_MAX_SELECT_POINT=0 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +influxql-max-select-point: 0 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +influxql-max-select-point = 0 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "influxql-max-select-point": 0 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### influxql-max-select-series +Maximum number of series a `SELECT` statement can return. +`0` allows an unlimited number of series. + +**Default:** `0` + +| influxd flag | Environment variable | Configuration key | +| :----------------------------- | :----------------------------------- | :--------------------------- | +| `--influxql-max-select-series` | `INFLUXD_INFLUXQL_MAX_SELECT_SERIES` | `influxql-max-select-series` | + +###### influxd flag +```sh +influxd --influxql-max-select-series=0 +``` + +###### Environment variable +```sh +export INFLUXD_INFLUXQL_MAX_SELECT_SERIES=0 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +influxql-max-select-series: 0 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +influxql-max-select-series = 0 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "influxql-max-select-series": 0 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### instance-id + +Identifies edge nodes during replication, and prevents collisions if two edge nodes write the same `measurement,tagset`. + +| influxd flag | Environment variable | Configuration key | +| :-------------- | :-------------------- | :---------------- | +| `--instance-id` | `INFLUXD_INSTANCE_ID` | `instance-id` | + +###### influxd flag +```sh +influxd --instance-id=:8086 +``` + +###### Environment variable +```sh +export INFLUXD_INSTANCE_ID=:8086 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +instance-id: ":8086" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +instance-id = ":8086" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "instance-id": ":8086" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### log-level +Log output level. +InfluxDB outputs log entries with severity levels greater than or equal to the level specified. + +**Options:** `debug`, `info`, `error` +**Default:** `info` + +| influxd flag | Environment variable | Configuration key | +| :------------ | :------------------- | :---------------- | +| `--log-level` | `INFLUXD_LOG_LEVEL` | `log-level` | + +###### influxd flag +```sh +influxd --log-level=info +``` + +###### Environment variable +```sh +export INFLUXD_LOG_LEVEL=info +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +log-level: info +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +log-level = "info" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "log-level": "info" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### metrics-disabled +Disable the HTTP `/metrics` endpoint which exposes [internal InfluxDB metrics](/influxdb/v2.5/reference/internals/metrics/). + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :------------------- | :------------------------- | :----------------- | +| `--metrics-disabled` | `INFLUXD_METRICS_DISABLED` | `metrics-disabled` | + +###### influxd flag +```sh +influxd --metrics-disabled +``` + +###### Environment variable +```sh +export INFLUXD_METRICS_DISABLED=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +metrics-disabled: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +metrics-disabled = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "metrics-disabled": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### nats-max-payload-bytes + +{{% warn %}} +`nats-max-payload-bytes` was **deprecated in InfluxDB 2.2** and no longer has any effect. +{{% /warn %}} + +Maximum number of bytes allowed in a NATS message payload. + +**Default:** `1048576` + +| influxd flag | Environment variable | Configuration key | +| :------------------------- | :------------------------------- | :----------------------- | +| `--nats-max-payload-bytes` | `INFLUXD_NATS_MAX_PAYLOAD_BYTES` | `nats-max-payload-bytes` | + +###### influxd flag +```sh +influxd --nats-max-payload-bytes=1048576 +``` + +###### Environment variable +```sh +export INFLUXD_NATS_MAX_PAYLOAD_BYTES=1048576 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +nats-max-payload-bytes: 1048576 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +nats-max-payload-bytes = 1048576 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "nats-max-payload-bytes": 1048576 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### nats-port + +{{% warn %}} +`nats-port` was **deprecated in InfluxDB 2.2** and no longer has any effect. +{{% /warn %}} + +Port for the NATS streaming server. `-1` selects a random port. + +**Default:** `-1` + +| influxd flag | Environment variable | Configuration key | +| :------------ | :------------------- | :---------------- | +| `--nats-port` | `INFLUXD_NATS_PORT` | `nats-port` | + +###### influxd flag +```sh +influxd --nats-port=-1 +``` + +###### Environment variable +```sh +export INFLUXD_NATS_PORT=-1 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +nats-port: -1 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +nats-port = -1 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "nats-port": -1 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### no-tasks +Disable the task scheduler. +If problematic tasks prevent InfluxDB from starting, use this option to start +InfluxDB without scheduling or executing tasks. + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :----------- | :------------------- | :---------------- | +| `--no-tasks` | `INFLUXD_NO_TASKS` | `no-tasks` | + +###### influxd flag +```sh +influxd --no-tasks +``` + +###### Environment variable +```sh +export INFLUXD_NO_TASKS=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +no-tasks: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +no-tasks = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "no-tasks": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### pprof-disabled +Disable the `/debug/pprof` HTTP endpoint. +This endpoint provides runtime profiling data and can be helpful when debugging. + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :----------------- | :----------------------- | :---------------- | +| `--pprof-disabled` | `INFLUXD_PPROF_DISABLED` | `pprof-disabled` | + +###### influxd flag +```sh +influxd --pprof-disabled +``` + +###### Environment variable +```sh +export INFLUXD_PPROF_DISABLED=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +pprof-disabled: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +pprof-disabled = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "pprof-disabled": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### query-concurrency +Number of queries allowed to execute concurrently. + +**Default:** `10` + +| influxd flag | Environment variable | Configuration key | +| :-------------------- | :-------------------------- | :------------------ | +| `--query-concurrency` | `INFLUXD_QUERY_CONCURRENCY` | `query-concurrency` | + +###### influxd flag +```sh +influxd --query-concurrency=10 +``` + +###### Environment variable +```sh +export INFLUXD_QUERY_CONCURRENCY=10 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +query-concurrency: 10 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +query-concurrency = 10 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "query-concurrency": 10 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### query-initial-memory-bytes +Initial bytes of memory allocated for a query. + +**Default:** _equal to_ [query-memory-bytes](#query-memory-bytes) + +| influxd flag | Environment variable | Configuration key | +| :----------------------------- | :----------------------------------- | :--------------------------- | +| `--query-initial-memory-bytes` | `INFLUXD_QUERY_INITIAL_MEMORY_BYTES` | `query-initial-memory-bytes` | + +###### influxd flag +```sh +influxd --query-initial-memory-bytes=10485760 +``` + +###### Environment variable +```sh +export INFLUXD_QUERY_INITIAL_MEMORY_BYTES=10485760 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +query-initial-memory-bytes: 10485760 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +query-initial-memory-bytes = 10485760 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "query-initial-memory-bytes": 10485760 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### query-max-memory-bytes +Maximum total bytes of memory allowed for queries. + +**Default:** _equal to_ [query-concurrency](#query-concurrency) × [query-memory-bytes](#query-memory-bytes) + +| influxd flag | Environment variable | Configuration key | +| :------------------------- | :------------------------------- | :----------------------- | +| `--query-max-memory-bytes` | `INFLUXD_QUERY_MAX_MEMORY_BYTES` | `query-max-memory-bytes` | + +###### influxd flag +```sh +influxd --query-max-memory-bytes=104857600 +``` + +###### Environment variable +```sh +export INFLUXD_QUERY_MAX_MEMORY_BYTES=104857600 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +query-max-memory-bytes: 104857600 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +query-max-memory-bytes = 104857600 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "query-max-memory-bytes": 104857600 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### query-memory-bytes +Maximum bytes of memory allowed for a single query. + +**Default:** _unlimited_ + +{{% note %}} +Must be greater than or equal to [query-initial-memory-bytes](#query-initial-memory-bytes). +{{% /note %}} + +| influxd flag | Environment variable | Configuration key | +| :--------------------- | :--------------------------- | :------------------- | +| `--query-memory-bytes` | `INFLUXD_QUERY_MEMORY_BYTES` | `query-memory-bytes` | + +###### influxd flag +```sh +influxd --query-memory-bytes=10485760 +``` + +###### Environment variable +```sh +export INFLUXD_QUERY_MEMORY_BYTES=10485760 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +query-memory-bytes: 10485760 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +query-memory-bytes = 10485760 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "query-memory-bytes": 10485760 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### query-queue-size +Maximum number of queries allowed in execution queue. +When queue limit is reached, new queries are rejected. + +**Default:** `10` + +| influxd flag | Environment variable | Configuration key | +| :------------------- | :------------------------- | :----------------- | +| `--query-queue-size` | `INFLUXD_QUERY_QUEUE_SIZE` | `query-queue-size` | + +###### influxd flag +```sh +influxd --query-queue-size=10 +``` + +###### Environment variable +```sh +export INFLUXD_QUERY_QUEUE_SIZE=10 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +query-queue-size: 10 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +query-queue-size = 10 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "query-queue-size": 10 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### reporting-disabled +Disables sending telemetry data to InfluxData. +The [InfluxData telemetry](https://www.influxdata.com/telemetry) page provides +information about what data is collected and how InfluxData uses it. + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :--------------------- | :--------------------------- | :------------------- | +| `--reporting-disabled` | `INFLUXD_REPORTING_DISABLED` | `reporting-disabled` | + +###### influxd flag +```sh +influxd --reporting-disabled +``` + +###### Environment variable +```sh +export INFLUXD_REPORTING_DISABLED=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +reporting-disabled: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +reporting-disabled = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "reporting-disabled": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### secret-store +Specifies the data store for secrets such as passwords and tokens. +Store secrets in either the InfluxDB [internal BoltDB](#bolt-path) +or in [Vault](https://www.vaultproject.io/). + +**Options:** `bolt`, `vault` +**Default:** `bolt` + +| influxd flag | Environment variable | Configuration key | +| :--------------- | :--------------------- | :---------------- | +| `--secret-store` | `INFLUXD_SECRET_STORE` | `secret-store` | + +###### influxd flag +```sh +influxd --secret-store=bolt +``` + +###### Environment variable +```sh +export INFLUXD_SECRET_STORE=bolt +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +secret-store: bolt +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +secret-store = "bolt" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "secret-store": "bolt" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### session-length +Specifies the Time to Live (TTL) **in minutes** for newly created user sessions. + +**Default:** `60` + +| influxd flag | Environment variable | Configuration key | +| :----------------- | :----------------------- | :---------------- | +| `--session-length` | `INFLUXD_SESSION_LENGTH` | `session-length` | + +###### influxd flag +```sh +influxd --session-length=60 +``` + +###### Environment variable +```sh +export INFLUXD_SESSION_LENGTH=60 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +session-length: 60 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +session-length = 60 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "session-length": 60 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### session-renew-disabled +Disables automatically extending a user's session TTL on each request. +By default, every request sets the session's expiration time to five minutes from now. +When disabled, sessions expire after the specified [session length](#session-length) +and the user is redirected to the login page, even if recently active. + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :------------------------- | :------------------------------- | :----------------------- | +| `--session-renew-disabled` | `INFLUXD_SESSION_RENEW_DISABLED` | `session-renew-disabled` | + +###### influxd flag +```sh +influxd --session-renew-disabled +``` + +###### Environment variable +```sh +export INFLUXD_SESSION_RENEW_DISABLED=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +session-renew-disabled: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +session-renew-disabled = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "session-renew-disabled": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### sqlite-path + +Path to the SQLite database file. +The SQLite database is used to store metadata for notebooks and annotations. + +**Default:** _`influxd.sqlite` in the same directory as the [bolt-path](#bolt-path)._ + +| influxd flag | Environment variable | Configuration key | +| :-------------- | :-------------------- | :---------------- | +| `--sqlite-path` | `INFLUXD_SQLITE_PATH` | `sqlite-path` | + +###### influxd flag +```sh +influxd --sqlite-path ~/.influxdbv2/influxd.sqlite +``` + +###### Environment variable +```sh +export INFLUXD_SQLITE_PATH=~/.influxdbv2/influxd.sqlite +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +sqlite-path: ~/.influxdbv2/influxd.sqlite +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +sqlite-path = "~/.influxdbv2/influxd.sqlite" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "sqlite-path": "~/.influxdbv2/influxd.sqlite" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-cache-max-memory-size +Maximum size (in bytes) a shard's cache can reach before it starts rejecting writes. + +**Default:** `1073741824` + +| influxd flag | Environment variable | Configuration key | +| :-------------------------------- | :-------------------------------------- | :------------------------------ | +| `--storage-cache-max-memory-size` | `INFLUXD_STORAGE_CACHE_MAX_MEMORY_SIZE` | `storage-cache-max-memory-size` | + +###### influxd flag +```sh +influxd --storage-cache-max-memory-size=1073741824 +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_CACHE_MAX_MEMORY_SIZE=1073741824 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-cache-max-memory-size: 1073741824 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-cache-max-memory-size = 1073741824 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-cache-max-memory-size": 1073741824 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-cache-snapshot-memory-size +Size (in bytes) at which the storage engine will snapshot the cache +and write it to a TSM file to make more memory available. + +**Default:** `26214400`) + +| influxd flag | Environment variable | Configuration key | +| :------------------------------------- | :------------------------------------------- | :----------------------------------- | +| `--storage-cache-snapshot-memory-size` | `INFLUXD_STORAGE_CACHE_SNAPSHOT_MEMORY_SIZE` | `storage-cache-snapshot-memory-size` | + +###### influxd flag +```sh +influxd --storage-cache-snapshot-memory-size=26214400 +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_CACHE_SNAPSHOT_MEMORY_SIZE=26214400 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-cache-snapshot-memory-size: 26214400 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-cache-snapshot-memory-size = 26214400 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-cache-snapshot-memory-size": 26214400 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-cache-snapshot-write-cold-duration +Duration at which the storage engine will snapshot the cache and +write it to a new TSM file if the shard hasn't received writes or deletes. + +**Default:** `10m0s` + +| influxd flag | Environment variable | Configuration key | +| :--------------------------------------------- | :--------------------------------------------------- | :------------------------------------------- | +| `--storage-cache-snapshot-write-cold-duration` | `INFLUXD_STORAGE_CACHE_SNAPSHOT_WRITE_COLD_DURATION` | `storage-cache-snapshot-write-cold-duration` | + +###### influxd flag +```sh +influxd --storage-cache-snapshot-write-cold-duration=10m0s +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_CACHE_SNAPSHOT_WRITE_COLD_DURATION=10m0s +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-cache-snapshot-write-cold-duration: 10m0s +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-cache-snapshot-write-cold-duration = "10m0s" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-cache-snapshot-write-cold-duration": "10m0s" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-compact-full-write-cold-duration +Duration at which the storage engine will compact all TSM files in a +shard if it hasn't received writes or deletes. + +**Default:** `4h0m0s` + +| influxd flag | Environment variable | Configuration key | +| :------------------------------------------- | :------------------------------------------------- | :----------------------------------------- | +| `--storage-compact-full-write-cold-duration` | `INFLUXD_STORAGE_COMPACT_FULL_WRITE_COLD_DURATION` | `storage-compact-full-write-cold-duration` | + +###### influxd flag +```sh +influxd --storage-compact-full-write-cold-duration=4h0m0s +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_COMPACT_FULL_WRITE_COLD_DURATION=4h0m0s +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-compact-full-write-cold-duration: 4h0m0s +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-compact-full-write-cold-duration = "4h0m0s" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-compact-full-write-cold-duration": "4h0m0s" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-compact-throughput-burst +Rate limit (in bytes per second) that TSM compactions can write to disk. + +**Default:** `50331648` + +| influxd flag | Environment variable | Configuration key | +| :----------------------------------- | :----------------------------------------- | :--------------------------------- | +| `--storage-compact-throughput-burst` | `INFLUXD_STORAGE_COMPACT_THROUGHPUT_BURST` | `storage-compact-throughput-burst` | + +###### influxd flag +```sh +influxd --storage-compact-throughput-burst=50331648 +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_COMPACT_THROUGHPUT_BURST=50331648 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-compact-throughput-burst: 50331648 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-compact-throughput-burst = 50331648 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-compact-throughput-burst": 50331648 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-max-concurrent-compactions +Maximum number of full and level compactions that can run concurrently. +A value of `0` results in 50% of `runtime.GOMAXPROCS(0)` used at runtime. +Any number greater than zero limits compactions to that value. +_This setting does not apply to cache snapshotting._ + +**Default:** `0` + +| influxd flag | Environment variable | Configuration key | +| :------------------------------------- | :------------------------------------------- | :----------------------------------- | +| `--storage-max-concurrent-compactions` | `INFLUXD_STORAGE_MAX_CONCURRENT_COMPACTIONS` | `storage-max-concurrent-compactions` | + +###### influxd flag +```sh +influxd --storage-max-concurrent-compactions=0 +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_MAX_CONCURRENT_COMPACTIONS=0 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-max-concurrent-compactions: 0 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-max-concurrent-compactions = 0 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-max-concurrent-compactions": 0 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-max-index-log-file-size +Size (in bytes) at which an index write-ahead log (WAL) file will compact into an index file. +Lower sizes will cause log files to be compacted more quickly and result in lower +heap usage at the expense of write throughput. + +**Default:** `1048576` + +| influxd flag | Environment variable | Configuration key | +| :---------------------------------- | :---------------------------------------- | :-------------------------------- | +| `--storage-max-index-log-file-size` | `INFLUXD_STORAGE_MAX_INDEX_LOG_FILE_SIZE` | `storage-max-index-log-file-size` | + +###### influxd flag +```sh +influxd --storage-max-index-log-file-size=1048576 +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_MAX_INDEX_LOG_FILE_SIZE=1048576 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-max-index-log-file-size: 1048576 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-max-index-log-file-size = 1048576 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-max-index-log-file-size": 1048576 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-no-validate-field-size +Skip field size validation on incoming write requests. + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :--------------------------------- | :--------------------------------------- | :------------------------------- | +| `--storage-no-validate-field-size` | `INFLUXD_STORAGE_NO_VALIDATE_FIELD_SIZE` | `storage-no-validate-field-size` | + +###### influxd flag +```sh +influxd --storage-no-validate-field-size +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_NO_VALIDATE_FIELD_SIZE=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-no-validate-field-size: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-no-validate-field-size = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-no-validate-field-size": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-retention-check-interval +Interval of retention policy enforcement checks. + +**Default:** `30m0s` + +| influxd flag | Environment variable | Configuration key | +| :----------------------------------- | :----------------------------------------- | :--------------------------------- | +| `--storage-retention-check-interval` | `INFLUXD_STORAGE_RETENTION_CHECK_INTERVAL` | `storage-retention-check-interval` | + +###### influxd flag +```sh +influxd --storage-retention-check-interval=30m0s +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_RETENTION_CHECK_INTERVAL=30m0s +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-retention-check-interval: 30m0s +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-retention-check-interval = "30m0s" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-retention-check-interval": "30m0s" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-series-file-max-concurrent-snapshot-compactions +Maximum number of snapshot compactions that can run concurrently across +all series partitions in a database. + +**Default:** `0` + +| influxd flag | Environment variable | Configuration key | +| :---------------------------------------------------------- | :---------------------------------------------------------------- | :-------------------------------------------------------- | +| `--storage-series-file-max-concurrent-snapshot-compactions` | `INFLUXD_STORAGE_SERIES_FILE_MAX_CONCURRENT_SNAPSHOT_COMPACTIONS` | `storage-series-file-max-concurrent-snapshot-compactions` | + +###### influxd flag +```sh +influxd --storage-series-file-max-concurrent-snapshot-compactions=0 +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_SERIES_FILE_MAX_CONCURRENT_SNAPSHOT_COMPACTIONS=0 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-series-file-max-concurrent-snapshot-compactions: 0 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-series-file-max-concurrent-snapshot-compactions = 0 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-series-file-max-concurrent-snapshot-compactions": 0 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-series-id-set-cache-size +Size of the internal cache used in the TSI index to store +previously calculated series results. +Cached results are returned quickly rather than needing to be recalculated when +a subsequent query with the same tag key/value predicate is executed. +Setting this value to `0` will disable the cache and may decrease query performance. + +**Default:** `100` + +{{% note %}} +This value should only be increased if the set of regularly used tag key/value +predicates across all measurements for a database is larger than 100. +An increase in cache size may lead to an increase in heap usage. +{{% /note %}} + +| influxd flag | Environment variable | Configuration key | +| :----------------------------------- | :----------------------------------------- | :--------------------------------- | +| `--storage-series-id-set-cache-size` | `INFLUXD_STORAGE_SERIES_ID_SET_CACHE_SIZE` | `storage-series-id-set-cache-size` | + +###### influxd flag +```sh +influxd --storage-series-id-set-cache-size=100 +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_SERIES_ID_SET_CACHE_SIZE=100 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-series-id-set-cache-size: 100 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-series-id-set-cache-size = 100 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-series-id-set-cache-size": 100 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-shard-precreator-advance-period +The time before a shard group's end-time that the successor shard group is created. + +**Default:** `30m0s` + +| influxd flag | Environment variable | Configuration key | +| :------------------------------------------ | :------------------------------------------------ | :---------------------------------------- | +| `--storage-shard-precreator-advance-period` | `INFLUXD_STORAGE_SHARD_PRECREATOR_ADVANCE_PERIOD` | `storage-shard-precreator-advance-period` | + +###### influxd flag +```sh +influxd --storage-shard-precreator-advance-period=30m0s +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_SHARD_PRECREATOR_ADVANCE_PERIOD=30m0s +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-shard-precreator-advance-period: 30m0s +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-shard-precreator-advance-period = "30m0s" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-shard-precreator-advance-period": "30m0s" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-shard-precreator-check-interval +Interval of pre-create new shards check. + +**Default:** `10m0s` + +| influxd flag | Environment variable | Configuration key | +| :------------------------------------------ | :------------------------------------------------ | :---------------------------------------- | +| `--storage-shard-precreator-check-interval` | `INFLUXD_STORAGE_SHARD_PRECREATOR_CHECK_INTERVAL` | `storage-shard-precreator-check-interval` | + +###### influxd flag +```sh +influxd --storage-shard-precreator-check-interval=10m0s +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_SHARD_PRECREATOR_CHECK_INTERVAL=10m0s +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-shard-precreator-check-interval: 10m0s +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-shard-precreator-check-interval = "10m0s" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-shard-precreator-check-interval": "10m0s" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-tsm-use-madv-willneed +Inform the kernel that InfluxDB intends to page in mmap'd sections of TSM files. + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :-------------------------------- | :-------------------------------------- | :------------------------------ | +| `--storage-tsm-use-madv-willneed` | `INFLUXD_STORAGE_TSM_USE_MADV_WILLNEED` | `storage-tsm-use-madv-willneed` | + +###### influxd flag +```sh +influxd --storage-tsm-use-madv-willneed +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_TSM_USE_MADV_WILLNEED=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-tsm-use-madv-willneed: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-tsm-use-madv-willneed = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-tsm-use-madv-willneed": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-validate-keys +Validate incoming writes to ensure keys have only valid unicode characters. + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :------------------------ | :------------------------------ | :---------------------- | +| `--storage-validate-keys` | `INFLUXD_STORAGE_VALIDATE_KEYS` | `storage-validate-keys` | + +###### influxd flag +```sh +influxd --storage-validate-keys +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_VALIDATE_KEYS=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-validate-keys: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-validate-keys = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-validate-keys": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-wal-fsync-delay +Duration a write will wait before fsyncing. +A duration greater than `0` batches multiple fsync calls. +This is useful for slower disks or when WAL write contention is present. + +**Default:** `0s` + +| influxd flag | Environment variable | Configuration key | +| :-------------------------- | :-------------------------------- | :------------------------ | +| `--storage-wal-fsync-delay` | `INFLUXD_STORAGE_WAL_FSYNC_DELAY` | `storage-wal-fsync-delay` | + +###### influxd flag +```sh +influxd --storage-wal-fsync-delay=0s +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_WAL_FSYNC_DELAY=0s +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-wal-fsync-delay: 0s +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-wal-fsync-delay = "0s" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-wal-fsync-delay": "0s" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-wal-max-concurrent-writes +Maximum number writes to the WAL directory to attempt at the same time. + +**Default:** `0` _(number of processing units available × 2)_ + +| influxd flag | Environment variable | Configuration key | +| :------------------------------------ | :------------------------------------------ | :---------------------------------- | +| `--storage-wal-max-concurrent-writes` | `INFLUXD_STORAGE_WAL_MAX_CONCURRENT_WRITES` | `storage-wal-max-concurrent-writes` | + +###### influxd flag +```sh +influxd --storage-wal-max-concurrent-writes=0 +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_WAL_MAX_CONCURRENT_WRITES=0 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-wal-max-concurrent-writes: 0 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-wal-max-concurrent-writes = 0 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-wal-max-concurrent-writes": 0 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-wal-max-write-delay +Maximum amount of time a write request to the WAL directory will wait when the +the [maximum number of concurrent active writes to the WAL directory](#storage-wal-max-concurrent-writes) +has been met. Set to `0` to disable the timeout. + +**Default:** `10m` + +| influxd flag | Environment variable | Configuration key | +| :------------------------------ | :------------------------------------ | :---------------------------- | +| `--storage-wal-max-write-delay` | `INFLUXD_STORAGE_WAL_MAX_WRITE_DELAY` | `storage-wal-max-write-delay` | + +###### influxd flag +```sh +influxd --storage-wal-max-write-delay=10m +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_WAL_MAX_WRITE_DELAY=10m +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-wal-max-write-delay: 10m +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-wal-max-write-delay = "10m" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-wal-max-write-delay": "10m" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### storage-write-timeout +Maximum amount of time the storage engine will process a write request before timing out. + +**Default:** `10s` + +| influxd flag | Environment variable | Configuration key | +| :------------------------ | :------------------------------ | :---------------------- | +| `--storage-write-timeout` | `INFLUXD_STORAGE_WRITE_TIMEOUT` | `storage-write-timeout` | + +###### influxd flag +```sh +influxd --storage-write-timeout=10s +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_WRITE_TIMEOUT=10s +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-write-timeout: 10s +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-write-timeout = "10s" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-write-timeout": "10s" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### store +Specifies the data store for REST resources. + +**Options:** `disk`, `memory` +**Default:** `disk` + +{{% note %}} +For backwards compatibility, this flag also acceptss `bolt` as a value. +When using `disk`, REST resources are stored on disk using the [bolt-path](#bolt-path) and [sqlite-path](#sqlite-path). +{{% /note %}} + +{{% note %}} +`memory` is meant for transient environments, such as testing environments, where +data persistence does not matter. +InfluxData does not recommend using `memory` in production. +{{% /note %}} + +| influxd flag | Environment variable | Configuration key | +| :----------- | :------------------- | :---------------- | +| `--store` | `INFLUXD_STORE` | `store` | + +###### influxd flag +```sh +influxd --store=bolt +``` + +###### Environment variable +```sh +export INFLUXD_STORE=bolt +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +store: bolt +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +store = "bolt" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "store": "bolt" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### testing-always-allow-setup +Ensures the `/api/v2/setup` endpoint always returns `true` to allow onboarding. +This configuration option is primary used in continuous integration tests. + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :----------------------------- | :----------------------------------- | :--------------------------- | +| `--testing-always-allow-setup` | `INFLUXD_TESTING_ALWAYS_ALLOW_SETUP` | `testing-always-allow-setup` | + +###### influxd flag +```sh +influxd --testing-always-allow-setup +``` + +###### Environment variable +```sh +export INFLUXD_TESTING_ALWAYS_ALLOW_SETUP=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +testing-always-allow-setup: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +testing-always-allow-setup = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "testing-always-allow-setup": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### tls-cert +Path to TLS certificate file. +Requires the [`tls-key`](#tls-key) to be set. + +_For more information, see [Enable TLS encryption](/influxdb/v2.5/security/enable-tls/)._ + +| influxd flag | Environment variable | Configuration key | +| :----------- | :------------------- | :---------------- | +| `--tls-cert` | `INFLUXD_TLS_CERT` | `tls-cert` | + +###### influxd flag +```sh +influxd --tls-cert=/path/to/influxdb.crt +``` + +###### Environment variable +```sh +export INFLUXD_TLS_CERT=/path/to/influxdb.crt +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +tls-cert: /path/to/influxdb.crt +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +tls-cert = "/path/to/influxdb.crt" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "tls-cert": "/path/to/influxdb.crt" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### tls-key +Path to TLS key file. +Requires the [`tls-cert`](#tls-cert) to be set. + +_For more information, see [Enable TLS encryption](/influxdb/v2.5/security/enable-tls/)._ + +| influxd flag | Environment variable | Configuration key | +| :----------- | :------------------- | :---------------- | +| `--tls-key` | `INFLUXD_TLS_KEY` | `tls-key` | + +###### influxd flag +```sh +influxd --tls-key=/path/to/influxdb.key +``` + +###### Environment variable +```sh +export INFLUXD_TLS_KEY=/path/to/influxdb.key +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +tls-key: /path/to/influxdb.key +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +tls-key = "/path/to/influxdb.key" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "tls-key": "/path/to/influxdb.key" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### tls-min-version +Minimum accepted TLS version. + +**Default:** `1.2` + +| influxd flag | Environment variable | Configuration key | +| :------------------ | :------------------------ | :---------------- | +| `--tls-min-version` | `INFLUXD_TLS_MIN_VERSION` | `tls-min-version` | + +###### influxd flag +```sh +influxd --tls-min-version=1.2 +``` + +###### Environment variable +```sh +export INFLUXD_TLS_MIN_VERSION=1.2 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +tls-min-version: "1.2" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +tls-min-version = "1.2" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "tls-min-version": "1.2" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### tls-strict-ciphers +Restrict accepted TLS ciphers to: + +- ECDHE_RSA_WITH_AES_256_GCM_SHA384 +- ECDHE_RSA_WITH_AES_256_CBC_SHA +- RSA_WITH_AES_256_GCM_SHA384 +- RSA_WITH_AES_256_CBC_SHA + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :--------------------- | :--------------------------- | :------------------- | +| `--tls-strict-ciphers` | `INFLUXD_TLS_STRICT_CIPHERS` | `tls-strict-ciphers` | + +###### influxd flag +```sh +influxd --tls-strict-ciphers +``` + +###### Environment variable +```sh +export INFLUXD_TLS_STRICT_CIPHERS=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +tls-strict-ciphers: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +tls-strict-ciphers = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "tls-strict-ciphers": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### tracing-type +Enable tracing in InfluxDB and specifies the tracing type. +Tracing is disabled by default. + +**Options:** `log`, `jaeger` + +| influxd flag | Environment variable | Configuration key | +| :--------------- | :--------------------- | :---------------- | +| `--tracing-type` | `INFLUXD_TRACING_TYPE` | `tracing-type` | + +###### influxd flag +```sh +influxd --tracing-type=log +``` + +###### Environment variable +```sh +export INFLUXD_TRACING_TYPE=log +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +tracing-type: log +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +tracing-type = "log" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "tracing-type": "log" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### ui-disabled +Disable the InfluxDB user interface (UI). +The UI is enabled by default. + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :-------------- | :-------------------- | :---------------- | +| `--ui-disabled` | `INFLUXD_UI_DISABLED` | `ui-disabled` | + +###### influxd flag +```sh +influxd --ui-disabled +``` + +###### Environment variable +```sh +export INFLUXD_UI_DISABLED=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +ui-disabled: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +ui-disabled = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "ui-disabled": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### vault-addr +Specifies the address of the Vault server expressed as a URL and port. +For example: `https://127.0.0.1:8200/`. + +| influxd flag | Environment variable | Configuration key | +| :------------- | :------------------- | :---------------- | +| `--vault-addr` | `VAULT_ADDR` | `vault-addr` | + +###### influxd flag +```sh +influxd --vault-addr=https://127.0.0.1:8200/ +``` + +###### Environment variable +```sh +export VAULT_ADDR=https://127.0.0.1:8200/ +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +vault-addr: https://127.0.0.1:8200/ +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +vault-addr = "https://127.0.0.1:8200/" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "vault-addr": "https://127.0.0.1:8200/" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### vault-cacert +Specifies the path to a PEM-encoded CA certificate file on the local disk. +This file is used to verify the Vault server's SSL certificate. +**This setting takes precedence over the [`--vault-capath`](#vault-capath) setting.** + +| influxd flag | Environment variable | Configuration key | +| :--------------- | :------------------- | :---------------- | +| `--vault-cacert` | `VAULT_CACERT` | `vault-cacert` | + +###### influxd flag +```sh +influxd --vault-cacert=/path/to/ca.pem +``` + +###### Environment variable +```sh +export VAULT_CACERT=/path/to/ca.pem +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +vault-cacert: /path/to/ca.pem +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +vault-cacert = "/path/to/ca.pem" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "vault-cacert": "/path/to/ca.pem" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### vault-capath +Specifies the path to a directory of PEM-encoded CA certificate files on the local disk. +These certificates are used to verify the Vault server's SSL certificate. + +| influxd flag | Environment variable | Configuration key | +| :--------------- | :------------------- | :---------------- | +| `--vault-capath` | `VAULT_CAPATH` | `vault-capath` | + +###### influxd flag +```sh +influxd --vault-capath=/path/to/certs/ +``` + +###### Environment variable +```sh +export VAULT_CAPATH=/path/to/certs/ +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +vault-capath: /path/to/certs/ +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +vault-capath = "/path/to/certs/" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "vault-capath": "/path/to/certs/" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### vault-client-cert +Specifies the path to a PEM-encoded client certificate on the local disk. +This file is used for TLS communication with the Vault server. + +| influxd flag | Environment variable | Configuration key | +| :-------------------- | :------------------- | :------------------ | +| `--vault-client-cert` | `VAULT_CLIENT_CERT` | `vault-client-cert` | + +###### influxd flag +```sh +influxd --vault-client-cert=/path/to/client_cert.pem +``` + +###### Environment variable +```sh +export VAULT_CLIENT_CERT=/path/to/client_cert.pem +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +vault-client-cert: /path/to/client_cert.pem +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +vault-client-cert = "/path/to/client_cert.pem" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "vault-client-cert": "/path/to/client_cert.pem" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### vault-client-key +Specifies the path to an unencrypted, PEM-encoded private key on disk which +corresponds to the matching client certificate. + +| influxd flag | Environment variable | Configuration key | +| :------------------- | :------------------- | :----------------- | +| `--vault-client-key` | `VAULT_CLIENT_KEY` | `vault-client-key` | + +###### influxd flag +```sh +influxd --vault-client-key=/path/to/private_key.pem +``` + +###### Environment variable +```sh +export VAULT_CLIENT_KEY=/path/to/private_key.pem +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +vault-client-key: /path/to/private_key.pem +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +vault-client-key = "/path/to/private_key.pem" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "vault-client-key": "/path/to/private_key.pem" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### vault-max-retries +Specifies the maximum number of retries when encountering a 5xx error code. +The default is 2 (for three attempts in total). Set this to 0 or less to disable retrying. + +**Default:** `2` + +| influxd flag | Environment variable | Configuration key | +| :-------------------- | :------------------- | :------------------ | +| `--vault-max-retries` | `VAULT_MAX_RETRIES` | `vault-max-retries` | + +###### influxd flag +```sh +influxd --vault-max-retries=2 +``` + +###### Environment variable +```sh +export VAULT_MAX_RETRIES=2 +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +vault-max-retries: 2 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +vault-max-retries = 2 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "vault-max-retries": 2 +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### vault-client-timeout +Specifies the Vault client timeout. + +**Default:** `60s` + +| influxd flag | Environment variable | Configuration key | +| :----------------------- | :--------------------- | :--------------------- | +| `--vault-client-timeout` | `VAULT_CLIENT_TIMEOUT` | `vault-client-timeout` | + +###### influxd flag +```sh +influxd --vault-client-timeout=60s +``` + +###### Environment variable +```sh +export VAULT_CLIENT_TIMEOUT=60s +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +vault-client-timeout: 60s +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +vault-client-timeout = "60s" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "vault-client-timeout": "60s" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### vault-skip-verify +Skip certificate verification when communicating with Vault. +_Setting this variable voids [Vault's security model](https://www.vaultproject.io/docs/internals/security.html) +and is **not recommended**._ + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :-------------------- | :------------------- | :------------------ | +| `--vault-skip-verify` | `VAULT_SKIP_VERIFY` | `vault-skip-verify` | + +###### influxd flag +```sh +influxd --vault-skip-verify +``` + +###### Environment variable +```sh +export VAULT_SKIP_VERIFY=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +vault-skip-verify: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +vault-skip-verify = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "vault-skip-verify": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### vault-tls-server-name +Specifies the name to use as the Server Name Indication (SNI) host when connecting via TLS. + +| influxd flag | Environment variable | Configuration key | +| :------------------------ | :---------------------- | :---------------------- | +| `--vault-tls-server-name` | `VAULT_TLS_SERVER_NAME` | `vault-tls-server-name` | + +###### influxd flag +```sh +influxd --vault-tls-server-name=secure.example.com +``` + +###### Environment variable +```sh +export VAULT_TLS_SERVER_NAME=secure.example.com +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +vault-tls-server-name: secure.example.com +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +vault-tls-server-name = "secure.example.com" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "vault-tls-server-name": "secure.example.com" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +### vault-token +Specifies the Vault token use when authenticating with Vault. + +| influxd flag | Environment variable | Configuration key | +| :-------------- | :------------------- | :---------------- | +| `--vault-token` | `VAULT_TOKEN` | `vault-token` | + +###### influxd flag +```sh +influxd --vault-token=exAmple-t0ken-958a-f490-c7fd0eda5e9e +``` + +###### Environment variable +```sh +export VAULT_TOKEN=exAmple-t0ken-958a-f490-c7fd0eda5e9e +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +vault-token: exAmple-t0ken-958a-f490-c7fd0eda5e9e +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +vault-token = "exAmple-t0ken-958a-f490-c7fd0eda5e9e" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "vault-token": "exAmple-t0ken-958a-f490-c7fd0eda5e9e" +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} diff --git a/content/influxdb/v2.5/reference/contributing/_index.md b/content/influxdb/v2.5/reference/contributing/_index.md new file mode 100644 index 000000000..89657c7a6 --- /dev/null +++ b/content/influxdb/v2.5/reference/contributing/_index.md @@ -0,0 +1,26 @@ +--- +title: Contribute to InfluxDB OSS +description: Find important information about what's included in new versions of InfluxData products. +menu: + influxdb_2_5_ref: + name: Contribute to InfluxDB + weight: 11 +--- + +To contribute to the InfluxDB OSS project, complete the following steps: + +1. [Sign the InfluxData Contributor License Agreement (CLA)](#sign-influxdata-contributor-license-agreement-cla). +2. [Review contribution guidelines](#review-contribution-guidelines). +3. [Review the InfluxDB open source license](#review-open-source-license). + +## Sign InfluxData Contributor License Agreement (CLA) + +Before contributing to the InfluxDB OSS project, you must complete and sign the [InfluxData Contributor License Agreement (CLA)](https://www.influxdata.com/legal/cla/), available on the InfluxData website. + +## Review contribution guidelines + +To learn how you can contribute to the InfluxDB OSS project, see our [Contributing guidelines](https://github.com/influxdata/influxdb/blob/master/CONTRIBUTING.md) in the GitHub repository. + +## Review open source license + +See information about our [open source MIT license for InfluxDB](https://github.com/influxdata/influxdb/blob/master/LICENSE) in GitHub. diff --git a/content/influxdb/v2.5/reference/faq.md b/content/influxdb/v2.5/reference/faq.md new file mode 100644 index 000000000..9248d7ea9 --- /dev/null +++ b/content/influxdb/v2.5/reference/faq.md @@ -0,0 +1,1034 @@ +--- +title: Frequently asked questions +description: Find answers to common questions related to InfluxDB OSS. +menu: + influxdb_2_5_ref: + name: Frequently asked questions +weight: 9 +--- + +##### Account management {href="account-management-1"} +- [How do I reset my password?](#how-do-i-reset-my-password) +- {{% cloud-only %}}[How do I switch between InfluxDB Cloud accounts?](#how-do-i-switch-between-influxdb-cloud-accounts){{% /cloud-only %}} + +{{% cloud-only %}} + +##### Billing and usage {href="billing-and-usage-1"} +- [How do I manage payment methods?](#how-do-i-manage-payment-methods) +- [Who do I contact for billing issues?](#who-do-i-contact-for-billing-issues) +- [How do I view data my data usage?](#how-do-i-view-data-my-data-usage) +- [How do I increase my organization's rate limits and quotas?](#how-do-i-increase-my-organizations-rate-limits-and-quotas) + +{{% /cloud-only %}} + +{{% cloud-only %}} + +##### InfluxDB Cloud service health {href="influxdb-cloud-service-health-1"} +- [Where can I see the current status of InfluxDB Cloud?](#where-can-i-see-the-current-status-of-influxdb-cloud) + +{{% /cloud-only %}} + +{{% oss-only %}} + +##### InfluxDB service health {href="influxdb-service-health-1"} +- [Where can I see the current status of my InfluxDB instance?](#where-can-i-see-the-current-status-of-my-influxdb-instance) + +{{% /oss-only %}} + +##### Security {href="security-1"} +- [What different types of API tokens exist?](#what-different-types-of-api-tokens-exist) +- [Can I use InfluxDB with authentication disabled?](#can-i-use-influxdb-with-authentication-disabled) +- {{% cloud-only %}}[Can you change the permission level of members in your organization?](#can-you-change-the-permission-level-of-members-in-your-organization){{% /cloud-only %}} + +##### Administration {href="administration-1"} +- {{% oss-only %}}[How can I identify my InfluxDB version?](#how-can-i-identify-my-influxdb-version){{% /oss-only %}} +- [How can I identify the version of Flux I'm using in InfluxDB?](#how-can-i-identify-the-version-of-flux-im-using-in-influxdb) +- {{% oss-only %}}[Where can I find InfluxDB logs?](#where-can-i-find-influxdb-logs){{% /oss-only %}} +- {{% oss-only %}}[What is the relationship between shard group durations and retention periods?](#what-is-the-relationship-between-shard-group-durations-and-retention-periods){{% /oss-only %}} +- [Why isn't data dropped after I update a bucket's retention period?](#why-isnt-data-dropped-after-i-update-a-buckets-retention-period) + +##### Data types {href="data-types-1"} +- [What are the minimum and maximum integers that InfluxDB can store?](#what-are-the-minimum-and-maximum-integers-that-influxdb-can-store) +- [What are the minimum and maximum timestamps that InfluxDB can store?](#what-are-the-minimum-and-maximum-timestamps-that-influxdb-can-store) +- [Can I change a field's data type?](#can-i-change-a-fields-data-type) + +##### Writing data {href="writing-data"} +- [How do I write integer and unsigned integer field values?](#how-do-i-write-integer-and-unsigned-integer-field-values) +- [How does InfluxDB handle duplicate points?](#how-does-influxdb-handle-duplicate-points) +- [What newline character does the InfluxDB write API require?](#what-newline-character-does-the-influxdb-write-api-require) +- [When should I single quote and when should I double quote when writing data?](#when-should-i-single-quote-and-when-should-i-double-quote-when-writing-data) +- [Does the precision of the timestamp matter?](#does-the-precision-of-the-timestamp-matter) +- {{% oss-only %}}[What are the configuration recommendations and schema guidelines for writing sparse, historical data?](#what-are-the-configuration-recommendations-and-schema-guidelines-for-writing-sparse-historical-data){{% /oss-only %}} + +##### Querying data {href="querying-data-1"} +- [Flux](#flux) + - [How do I structure fields as columns (like InfluxQL)?](#how-do-i-structure-fields-as-columns-like-influxql) + - [How can I derive a state from multiple field values?](#how-can-i-derive-a-state-from-multiple-field-values) +- [InfluxQL](#influxql) + - {{% cloud-only %}}[How do I use InfluxQL with InfluxDB Cloud?](#how-do-i-use-influxql-with-influxdb-cloud){{% /cloud-only %}} + - {{% oss-only %}}[How do I use InfluxQL with InfluxDB v2.x?](#how-do-i-use-influxql-with-influxdb-v2x){{% /oss-only %}} + - [How do I perform mathematical operations in an InfluxQL function?](#how-do-i-perform-mathematical-operations-in-an-influxql-function) + - [Why does my query return epoch 0 as the timestamp?](#why-does-my-query-return-epoch-0-as-the-timestamp) + - [Which InfluxQL functions support nesting?](#which-influxql-functions-support-nesting) + - [What determines the time intervals returned by `GROUP BY time()` queries?](#what-determines-the-time-intervals-returned-by-group-by-time-queries) + - [Why do my queries return no data or partial data?](#why-do-my-queries-return-no-data-or-partial-data) + - [Why don't my `GROUP BY time()` queries return timestamps that occur after `now()`?](#why-dont-my-group-by-time-queries-return-timestamps-that-occur-after-now) + - [Can I perform mathematical operations against timestamps?](#can-i-perform-mathematical-operations-against-timestamps) + - [Can I identify write precision from returned timestamps?](#can-i-identify-write-precision-from-returned-timestamps) + - [When should I use single quote versus double quotes in a query?](#when-should-i-use-single-quote-versus-double-quotes-in-a-query) + - [Why is my query with a `WHERE OR` time clause returning empty results?](#why-is-my-query-with-a-where-or-time-clause-returning-empty-results) + - [Why does `fill(previous)` return empty results?](#why-does-fillprevious-return-empty-results) + - [How do I query data with an identical tag key and field key?](#how-do-i-query-data-with-an-identical-tag-key-and-field-key) + - [How do I query data across measurements?](#how-do-i-query-data-across-measurements) + - [Does the order timestamps in a query matter?](#does-the-order-timestamps-in-a-query-matter) + - [How do I query data by a tag with a null value?](#how-do-i-query-data-by-a-tag-with-a-null-value) +- {{% cloud-only %}}[Why am I getting the error, "total duration of queries in the last 30s exceeds limit of 25m0s"?](#why-am-i-getting-the-error-total-duration-of-queries-in-the-last-30s-exceeds-limit-of-25m0s){{% /cloud-only %}} + +##### Deleting data {href="deleting-data"} +- [Can I delete a field?](#can-i-delete-a-field) +- [Can I delete a measurement?](#can-i-delete-a-measurement) +- [Can I delete multiple measurements at the same time?](#can-i-delete-multiple-measurements-at-the-same-time) +- [Do I need to verify that data is deleted?](#do-i-need-to-verify-that-data-is-deleted) + +##### InfluxDB tasks {href="influxdb-tasks-1"} +- [How does retrying a task affect relative time ranges?](#how-does-retrying-a-task-affect-relative-time-ranges) + +##### Series and series cardinality {href="series-and-series-cardinality-1"} +- [What is series cardinality?](#what-is-series-cardinality) +- [Why does series cardinality matter?](#why-does-series-cardinality-matter) +- {{% oss-only %}}[How do I remove series from the index?](#how-do-i-remove-series-from-the-index){{% /oss-only %}} + +--- + +## Account management + +#### How do I reset my password? + +{{% cloud-only %}} + +Use the **Forgot Password** link on the InfluxDB Cloud login page to update your +password. For more information, see +[Change your password](/influxdb/cloud/account-management/change-password/). + +{{% /cloud-only %}} + +{{% oss-only %}} + +Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) and the +[`influx user password` command](/influxdb/v2.5/reference/cli/influx/user/password/) +command to update a user's password. +For more information, see +[Change your password](/influxdb/v2.5/users/change-password/). + +{{% /oss-only %}} + +{{% cloud-only %}} + +#### How do I switch between InfluxDB Cloud accounts? +Use the **Switch Accounts** functionality in your InfluxDB Cloud account settings +to switch between InfluxDB Cloud accounts. +For more information, see [Switch InfluxDB Cloud accounts](/influxdb/cloud/account-management/switch-account/). + +--- + +## Billing and usage + +#### How do I manage payment methods? +- If you subscribed to InfluxDB Cloud through InfluxData, you can manage payment + methods in the [Billing section](https://cloud2.influxdata.com/me/billing) of + your InfluxDB Cloud account. +- If you subscribed to InfluxDB Cloud through a cloud provider marketplace + (**AWS Marketplace**, **Azure Marketplace**, or **GCP Marketplace**), + use your cloud provider's billing administration to manage payment methods. + +For more information, see [Manage InfluxDB Cloud billing](/influxdb/cloud/account-management/billing/). + +#### Who do I contact for billing issues? +For billing issues, please [contact InfluxData support](https://support.influxdata.com/s/contactsupport). + +#### How do I view data my data usage? +To view your InfluxDB Cloud organization's data usage, view the [Usage page](https://cloud2.influxdata.com/me/usage) +in the InfluxDB Cloud user interface. For more information, see +[View InfluxDB Cloud data usage](/influxdb/cloud/account-management/data-usage/). + +#### How do I increase my organization's rate limits and quotas? +- If using the InfluxDB Cloud [Free Plan](/influxdb/cloud/account-management/pricing-plans/#free-plan), + for increased rate limits and quotas, upgrade to a + [Usage-Based](/influxdb/cloud/account-management/pricing-plans/#usage-based-plan) + or [Annual Plan](/influxdb/cloud/account-management/pricing-plans/#annual-plan). +- If using a **Usage-Based** or **Annual** Plan, [contact InfluxData support](https://support.influxdata.com/s/contactsupport) + and request rate limit and quota adjustments. + +{{% /cloud-only %}} + +--- + +{{% cloud-only %}} + +## InfluxDB Cloud service health + +#### Where can I see the current status of InfluxDB Cloud? +InfluxDB Cloud regions and underlying services are monitored at all times. +To see the current status of InfluxDB Cloud, view the [InfluxDB Cloud status page](https://status.influxdata.com). +To receive outage alerts and updates, subscribe to our status page. + +{{% /cloud-only %}} + +{{% oss-only %}} + +## InfluxDB service health + +#### Where can I see the current status of my InfluxDB instance? +InfluxDB {{< current-version >}} provides different ways to monitor its status: + +- The [`/health` API endpoint](/influxdb/v2.5/api/#tag/Health) returns a JSON + body with a summary of the current status of your InfluxDB instance. + +{{% expand-wrapper %}} +{{% expand "View example health summary" %}} +``` +{ + "name": "influxdb", + "message": "ready for queries and writes", + "status": "pass", + "checks": [], + "version": "{{< latest-patch >}}", + "commit": "xx00x0x000" +} +``` +{{% /expand %}} +{{% /expand-wrapper %}} + +- The [`/metrics` API endpoint](/influxdb/v2.5/api/#tag/Metrics) provides internal + InfluxDB metrics in Prometheus exposition format. Use [Telegraf](/{{< latest "telegraf" >}}/), + [InfluxDB scrapers](/influxdb/v2.5/write-data/no-code/scrape-data/), or the Flux + [`prometheus.scrape()` function](/flux/v0.x/stdlib/experimental/prometheus/scrape/) + to scrape these metrics and store them in InfluxDB where you can monitor and + alert on any anomalies. + + You can also use the [InfluxDB Open Source (OSS) Metrics template](https://github.com/influxdata/community-templates/tree/master/influxdb2_oss_metrics) + quickly setup InfluxDB OSS monitoring. + + For more information, see [Monitor InfluxDB OSS using a template](/influxdb/v2.5/monitor-alert/templates/monitor/) + +{{% /oss-only %}} + +--- + +## Security + +#### What different types of API tokens exist? +InfluxDB {{< current-version >}} supports the following token types: + +- {{% oss-only %}}Operator tokens{{% /oss-only %}} +- All-Access tokens +- {{% cloud-only %}}Custom tokens{{% /cloud-only %}} +- {{% oss-only %}}Read/Write tokens{{% /oss-only %}} + +For more information about each token type, see [Manage API tokens](/influxdb/v2.5/security/tokens/). + +#### Can I use InfluxDB with authentication disabled? +InfluxDB {{< current-version >}} enforces security best practices by requiring +API requests to be authenticated. Authentication cannot be disabled. + +{{% cloud-only %}} + +#### Can you change the permission level of members in your organization? +InfluxDB Cloud has only one permission level for users: Owner. +With Owner permissions, a user can delete resources and other users from your organization. +Take care when inviting a user. + +{{% /cloud-only %}} + +--- + +## Administration + +{{% oss-only %}} + +#### How can I identify my InfluxDB version? + +Use one of the following methods to identify the version of InfluxDB OSS you're using: + +- **Use the InfluxDB UI**: + - On the user login page + - In the right column of the main landing page + +- **Use the `influxd version` command** + + ```bash + $ influxd version + + InfluxDB {{< latest-patch >}} (git: x0x000xx0x) build_date: YYYY-MM-DDThh:mm:ssZ + ``` + +- **Use the `/health` API endpoint**. + + The following example uses [`jq`](https://stedolan.github.io/jq/) to process the + JSON body returned from the `/health` API endpoint and extract the InfluxDB version. + You don't have to process the JSON with `jq`. For an example of the JSON + returned by the `/health` endpoint, see [View example health summary](#view-example-health-summary). + + ```bash + $ curl -s http://localhost:8086/health | jq -r '.version' + + {{< latest-patch >}} + ``` + +{{% /oss-only %}} + +#### How can I identify the version of Flux I'm using in InfluxDB? +For information about what versions of Flux are packaged with official InfluxDB +releases, see [Flux versions in InfluxDB](/flux/v0.x/influxdb-versions/). + +If using a custom build, use the following query to return the current version +of Flux being used: + +```js +import "array" +import "runtime" + +array.from(rows: [{version: runtime.version()}]) +``` + +For more information, see [Query the Flux version](/influxdb/cloud/query-data/flux/flux-version/). + +{{% oss-only %}} + +#### Where can I find InfluxDB logs? +All InfluxDB logs are output by the `influxd` service. +To store logs to a file, pipe the output of `influxd` to a file. For example: + +``` +influxd 2>~/path/to/influxd-errors.log +``` + +#### What is the relationship between shard group durations and retention periods? +InfluxDB buckets store data in shard groups. +A single shard group covers a specific time interval. +InfluxDB determines that time interval by using the retention period of the bucket. +The table below outlines the default relationship between the bucket retention +period and the time interval of a shard group: + +| Bucket retention period | Default shard group duration | +| :-------------------------- | ---------------------------: | +| less than 2 days | 1h | +| between 2 days and 6 months | 1d | +| greater than 6 months | 7d | + +For more information, see [InfluxDB Shards and shard groups](/influxdb/v2.5/reference/internals/shards/). + +{{% /oss-only %}} + +#### Why isn't data dropped after I update a bucket's retention period? +Below are reasons why data may not be dropped immediately after updating +the retention period of a bucket: + +- **The retention enforcement service runs {{% cloud-only %}}hourly{{% /cloud-only %}}{{% oss-only %}}every 30 minutes (by default){{% /oss-only %}}**. + You may need to wait for the next retention enforcement cycle to run. + +- {{% oss-only %}} + + **InfluxDB drops shard groups, not individual points**. + Shard groups cover a specific time interval assigned to the shard group on creation. + The retention service will only delete a shard group when the entire time + range covered by the shard group is beyond the bucket retention period. + + If the bucket's new retention period is less than the old shard group duration + and InfluxDB is currently writing data to the old, longer shard group, the + the retention service will not drop old shard group until its assigned + interval is fully expired. + + {{% /oss-only %}} + +For more information, see [Data retention](/influxdb/v2.5/reference/internals/data-retention/). + + + +--- + +## Data types + +#### What are the minimum and maximum integers that InfluxDB can store? +InfluxDB stores all integers as signed 64bit integers. + +**Minimum integer**: `-9023372036854775808` +**Maximum integer**: `9023372036854775807` + +Values close to but within those limits may lead to unexpected behavior. +Some query operations convert 64bit integers to 64bit float values +which can cause overflow issues. + +#### What are the minimum and maximum timestamps that InfluxDB can store? +InfluxDB uses 64bit integers to represent Unix nanosecond timestamps. + +**Minimum timestamp**: `-9223372036854775806` or `1677-09-21T00:12:43.145224194Z` +**Maximum timestamp**: `9223372036854775806` or `2262-04-11T23:47:16.854775806Z` + +Timestamps outside that range return a parsing error. + +#### Can I change a field's data type? +[Flux type-conversion functions](/flux/v0.x/function-types/#type-conversions) let +you change a fields data type at query time. +However, you cannot change the type of a field on disk. +Below are some possible workarounds: + +- **Copy a field to a new field as a different type.** + The example below does the following: + + - Queries the `example-string-field`. + - Converts field values to booleans. + - Changes the field name to `example-boolean-field`. + - Writes the new field to the source bucket. + + ```javascript + from(bucket: "example-bucket") + |> range(start: -30d) + |> filter(fn: (r) => r._measurement == "exampled-measurement") + |> filter(fn: (r) => r._field == "example-string-field") + |> toBool() + |> set(key: "_field", value: "example-boolean-field") + |> to(bucket: "example-bucket") + ``` + +- **Copy a field to a new bucket as a different type.** + The example below does the following: + + - Queries the `example-int-field` from the `example-bucket-1` bucket. + - Converts field values to float values. + - Changes the field name to `example-float-field`. + - Writes the new field to the `example-bucket-2` bucket. + + ```javascript + from(bucket: "example-bucket-1") + |> range(start: -30d) + |> filter(fn: (r) => r._measurement == "exampled-measurement") + |> filter(fn: (r) => r._field == "example-int-field") + |> toFloat() + |> set(key: "_field", value: "example-float-field") + |> to(bucket: "example-bucket-2") + ``` + +--- + +## Writing data + +#### How do I write integer and unsigned integer field values? + +In line protocol, identify **integers** with a trailing `i` and **unsigned integers** +with a trailing `u`. Without these, numeric field values are parsed as floats. + +```sh +# Integer +value=100i + +# Unsigned integer +value=100u + +# Float +value=100 +``` + +#### How does InfluxDB handle duplicate points? + +InfluxDB uniquely identifies a point by its **measurement**, **tag set**, and **timestamp**. +If you submit a new point with the same measurement, tag set, and timestamp as +an existing point, InfluxDB unions the old field with the new field set, and +any ties go to the new field set. + +For more information, see [Handle duplicate data points](/influxdb/v2.5/write-data/best-practices/duplicate-points/). + +#### What newline character does the InfluxDB write API require? + +InfluxDB line protocol relies on line feed (`\n`, which is ASCII `0x0A`) to +indicate the end of one line and the beginning of a new line. +Files or data that use a newline character other than `\n` will result in errors +similar to `bad timestamp` or `unable to parse`. + +{{% note %}} +##### Windows newlines +Windows uses carriage return and line feed (`\r\n`) as the newline character which +will result in an error if you manually write line protocol on a Windows machine. +Strip out any carriage returns (`\r`) before submitting the line protocol to the +InfluxDB write API. +{{% /note %}} + +#### When should I single quote and when should I double quote when writing data? + +Line protocol quote usage guidelines are provided in the +[line protocol documentation](/influxdb/cloud/reference/syntax/line-protocol/#quotes). + +#### Does the precision of the timestamp matter? + +Yes. Timestamp precision affects ingest performance. +The more precise the timestamp, the longer it takes to write the point. +To maximize performance, use the coarsest possible timestamp precision when +writing data to InfluxDB. However, if too coarse, you risk writing points from +the same series with the same timestamp, which would be treated as +[duplicate points](/influxdb/v2.5/write-data/best-practices/duplicate-points/). + +{{% oss-only %}} + +#### What are the configuration recommendations and schema guidelines for writing sparse, historical data? + +For sparse historical data, we recommend: + +- **Use a longer [shard group duration](/influxdb/v2.5/reference/internals/shards/#shard-group-duration) + on the bucket you're writing historical data to.** + Historical shard group durations can and should cover several years. + If your historical data spans many years, but your bucket's shard group duration + is 1 week, InfluxDB will create many shards, negatively affecting overall performance. + +- **Temporarily lower the + [`storage-cache-snapshot-write-cold-duration` configuration setting](/influxdb/v2.5/reference/config-options/#storage-cache-snapshot-write-cold-duration) + while ingesting historical data**. + The default setting (`10m`) can cause the system cache all of your data for every shard. + Temporarily lowering the `storage-cache-snapshot-write-cold-duration` setting + to `10s` while you write the historical data makes the process more efficient. + +{{% /oss-only %}} + +--- + +## Querying data + +### Flux + +#### How do I structure fields as columns (like InfluxQL)? +A `SELECT` statement in InfluxQL returns data with a column for each queried tag and field. +The Flux [`from()`](/flux/v0.x/stdlib/influxdata/influxdb/from/) function returns +data with a column for each tag as well as a `_field` column that contains the +field key. Each field is grouped into a different table. + +To structure each field as a column, use either [`pivot()`](/flux/v0.x/stdlib/universe/pivot/) +or [`schema.fieldsAsCols()`](/flux/v0.x/stdlib/influxdata/influxdb/schema/fieldsascols/). + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[pivot()](#) +[schema.fieldsAsCols](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```js +exampleData + |> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value") +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +import "influxdata/influxdb/schema" + +exampleData + |> schema.fieldsAsCols() +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +###### Example data returned by from() +| _measurement | sensor_id | location | _field | _time | _value | +| :----------- | :-------- | :-------- | :----- | :------------------- | -----: | +| machine | abc123 | station20 | temp | 2022-01-01T00:00:00Z | 150.1 | +| machine | abc123 | station20 | temp | 2022-01-01T00:00:10Z | 152.8 | +| machine | abc123 | station20 | temp | 2022-01-01T00:00:20Z | 153.3 | + +| _measurement | sensor_id | location | _field | _time | _value | +| :----------- | :-------- | :-------- | :----- | :------------------- | -----: | +| machine | abc123 | station20 | flow | 2022-01-01T00:00:00Z | 12.2 | +| machine | abc123 | station20 | flow | 2022-01-01T00:00:10Z | 14.9 | +| machine | abc123 | station20 | flow | 2022-01-01T00:00:20Z | 16.1 | + +###### Example pivoted data +| _measurement | sensor_id | location | _time | temp | flow | +| :----------- | :-------- | :-------- | :------------------- | ----: | ---: | +| machine | abc123 | station20 | 2022-01-01T00:00:00Z | 150.1 | 12.2 | +| machine | abc123 | station20 | 2022-01-01T00:00:10Z | 152.8 | 14.9 | +| machine | abc123 | station20 | 2022-01-01T00:00:20Z | 153.3 | 16.1 | + +#### How can I derive a state from multiple field values? +To compare multiple field values and derive a state: + +1. Query all fields necessary to derive a state. +1. Use `pivot()` or `schema.fieldsAsCols()` to pivot fields into columns. +2. Use `map()` to iterate over each input row assign a new column value based + on values in the field columns. + + The `fn` parameter of `map()` defines a + functions that outputs a record for each input row. Use conditional + logic to assign a state. + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "example-measurement") + |> filter(fn: (r) => r._field == "field1" or r._field == "field2") + |> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value") + |> map( + fn: (r) => + ({r with state: + if r.field1 > 90 and r.field2 < 10 then + "critical" + else if r.field1 > 70 and r.field2 < 30 then + "warning" + else if r.field1 > 40 and r.field2 < 60 then + "info" + else + "ok", + }), + ) +``` + +### InfluxQL + +{{% cloud-only %}} + +#### How do I use InfluxQL with InfluxDB Cloud? + +{{% /cloud-only %}} + +{{% oss-only %}} + +#### How do I use InfluxQL with InfluxDB v2.x? + +{{% /oss-only %}} + +Using InfluxQL with InfluxDB {{< current-version >}} is made possible by the +[1.x compatiblity API](/influxdb/v2.5/reference/api/influxdb-1x/) which replicates +the `/query` endpoint from InfluxDB 1.x. This allows all InfluxDB 1.x-compatible +clients to work with InfluxDB {{< current-version >}}. However, InfluxQL relies +on a database and retention policy data model doesn't exist in InfluxDB +{{< current-version >}}, but has been replaced by [buckets](influxdb/v2.5/reference/glossary/#bucket). + +InfluxDB {{< current-version >}} lets you map unique database and retention +policy combinations used in InfluxQL to specific buckets using DBRP mappings. + +For detailed instructions on using InfluxQL with InfluxDB {{< current-version >}} +and configuring DBRP mapping, see [Query with InfluxQL](influxdb/v2.5/query-data/influxql/). + +#### How do I perform mathematical operations in an InfluxQL function? +InfluxQL does not support mathematical operations within functions. +Use a [subquery](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#subqueries) to perform +the mathematical calculation. + +For example, InfluxQL does not support the following syntax: + +```sql +SELECT MEAN("dogs" - "cats") from "pet_daycare" +``` + +Instead, use a subquery to get the same result: + +```sql +SELECT MEAN("difference") FROM (SELECT "dogs" - "cat" AS "difference" FROM "pet_daycare") +``` + +#### Why does my query return epoch 0 as the timestamp? +In InfluxQL, epoch 0 (`1970-01-01T00:00:00Z`) is often used as a null timestamp equivalent. +If you request a query that has no timestamp to return, such as an aggregation +function with an unbounded time range, InfluxDB returns epoch 0 as the timestamp. + +#### Which InfluxQL functions support nesting? +The following InfluxQL functions support nesting: + +- [`COUNT()`](/{{< latest "influxdb" "v1" >}}/query_language/functions/#count) with [`DISTINCT()`](/{{< latest "influxdb" "v1" >}}/query_language/functions/#distinct) +- [`CUMULATIVE_SUM()`](/{{< latest "influxdb" "v1" >}}/query_language/functions/#cumulative-sum) +- [`DERIVATIVE()`](/{{< latest "influxdb" "v1" >}}/query_language/functions/#derivative) +- [`DIFFERENCE()`](/{{< latest "influxdb" "v1" >}}/query_language/functions/#difference) +- [`ELAPSED()`](/{{< latest "influxdb" "v1" >}}/query_language/functions/#elapsed) +- [`MOVING_AVERAGE()`](/{{< latest "influxdb" "v1" >}}/query_language/functions/#moving-average) +- [`NON_NEGATIVE_DERIVATIVE()`](/{{< latest "influxdb" "v1" >}}/query_language/functions/#non-negative-derivative) +- [`HOLT_WINTERS()`](/{{< latest "influxdb" "v1" >}}/query_language/functions/#holt-winters) and [`HOLT_WINTERS_WITH_FIT()`](/{{< latest "influxdb" "v1" >}}/query_language/functions/#holt-winters) + +For information on how to use subqueries as substitutes for nested functions, see +[InfluxQL data exploration](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#subqueries). + +#### What determines the time intervals returned by `GROUP BY time()` queries? +The time intervals returned by `GROUP BY time()` queries conform to the InfluxDB +database's preset time windows or to the user-specified +[offset interval](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#advanced-group-by-time-syntax). + +###### Preset time windows +For example, the following query calculates the average value of `sunflowers` between +6:15pm and 7:45pm and groups those averages into one hour intervals: + +```sql +SELECT mean("sunflowers") +FROM "flower_orders" +WHERE time >= '2016-08-29T18:15:00Z' AND time <= '2016-08-29T19:45:00Z' GROUP BY time(1h) +``` + +InfluxQL uses the duration specified in the `GROUP BY time()` clause to partition +data based on time. Preset time window boundaries fall on the duration unit specified. + +For example: + +| GROUP BY time() duration | Resulting window boundaries | +| :----------------------- | :--------------------------------------------- | +| 1s | 00:00:00 - 00:00:01, 00:00:01 - 00:00:02, etc. | +| 1m | 00:00:00 - 00:01:00, 00:01:00 - 00:02:00, etc. | +| 5m | 00:00:00 - 00:05:00, 00:05:00 - 00:10:00, etc. | +| 1h | 00:00:00 - 01:00:00, 01:00:00 - 02:00:00, etc. | + +Although window boundaries may fall outside of the queried time range, only +points within the queried time range are used in the calculation for each window. + +###### Offset time windows +As another example, the following query calculates the average value of +`sunflowers` between 6:15pm and 7:45pm and groups those averages into one hour intervals. +It offsets the InfluxDB database's preset time windows by `15` minutes. + +```sql +SELECT mean("sunflowers") +FROM "flower_orders" +WHERE time >= '2016-08-29T18:15:00Z' AND time <= '2016-08-29T19:45:00Z' GROUP BY time(1h,15m) + --- + | + offset interval +``` + +InfluxQL uses the duration and offset specified in the `GROUP BY time()` clause to partition +data based on time. Time boundaries begin at the specified offset. + +For example: + +| GROUP BY time() duration and offset | Resulting window boundaries | +| :---------------------------------- | :--------------------------------------------- | +| 1m,30s | 00:30:00 - 01:30:00, 01:30:00 - 02:30:00, etc. | +| 5m,15s | 00:00:15 - 00:05:15, 00:05:15 - 00:10:15, etc. | +| 1h,20m | 00:20:00 - 01:20:00, 01:20:00 - 02:20:00, etc. | + +#### Why do my queries return no data or partial data? + +The most common reasons why your query returns no data or partial data: + +- [Querying the wrong retention policy](#querying-the-wrong-retention-policy) (no data returned) +- [No field key in the SELECT clause](#no-field-key-in-the-select-clause) (no data returned) +- [SELECT query includes `GROUP BY time()`](#select-query-includes-group-by-time) (partial data before `now()` returned) +- [Tag and field key with the same name](#tag-and-field-key-with-the-same-name) + +##### Querying the wrong retention policy + +InfluxDB automatically queries data in a database’s default retention policy +(configured as part of a [DBRP mapping](/influxdb/v2.5/query-data/influxql/)). +If your data is associated another retention policy, you must specify the correct +retention policy to get results. + +##### No field key in the SELECT clause + +An InfluxQL query requires at least one **field key** in the `SELECT` clause. +If the `SELECT` clause includes only **tag keys**, the query returns an empty response. +For more information, see +[InfluxQL Data exploration](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#common-issues-with-the-select-statement). + +##### SELECT query includes `GROUP BY time()` + +If your `SELECT` query includes a [`GROUP BY time()` clause](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#group-by-time-intervals), +only data points between `1677-09-21 00:12:43.145224194` and +[`now()`](/{{< latest "influxdb" "v1" >}}/concepts/glossary/#now) are returned. +If any of your data points occur after `now()`, specify +[an alternative upper bound](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#time-syntax) +in your time interval. + +##### Tag and field key with the same name + +Avoid using the same name for a tag and field key. +If you inadvertently add the same name for a tag and field key, and then query +both together, the query results show the second key queried (tag or field) +appended with `_1`. To query a tag or field key appended with `_1`, +you **must drop** the appended `_1` **and include** the syntax `::tag` or `::field`. +For example: + +```sql +-- Query duplicate keys using the correct syntax +SELECT "leaves"::tag, "leaves"::field FROM db.rp."grape" + +name: grape +time leaves leaves_1 +---- -------- ---------- +1574128162128468000 species 6.00 +1574128238044155000 5.00 +``` + +#### Why don't my `GROUP BY time()` queries return timestamps that occur after `now()`? + +`SELECT` statements without a time range defined in the `WHERE` clause have a +default time range of `1677-09-21 00:12:43.145224194` to `2262-04-11T23:47:16.854775806Z` UTC. +For `SELECT` statements that don't specify a time range but have a +[`GROUP BY time()` clause](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#group-by-time-intervals), +the default time range is `1677-09-21 00:12:43.145224194` UTC to [`now()`](/influxdb/v2.5/reference/glossary/#now). + +To query data with timestamps that occur after `now()`, `SELECT` statements with +a `GROUP BY time()` clause must provide an alternative **upper** bound in the +[`WHERE` clause](/influxdb/v1.8/query_language/explore-data/#the-where-clause). +For example: + +```sql +SELECT MEAN("boards") FROM "hillvalley" +WHERE time >= '2022-01-01T00:00:00Z' AND time <= now() + 10d +GROUP BY time(12m) fill(none) +``` + +Note that the `WHERE` clause must provide an alternative **upper** bound to +override the default `now()` upper bound. The following query merely resets +the lower bound to `now()` such that the query's time range is between +`now()` and `now()`: + +```sql +SELECT MEAN("boards") FROM "hillvalley" +WHERE time >= now() +GROUP BY time(12m) fill(none) +``` + +For for more on time syntax in queries, see [InfluxQL data Exploration](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#time-syntax). + +#### Can I perform mathematical operations against timestamps? + +InfluxQL does not support mathematical operators against timestamp values. +Most time calculations must be carried out by the client receiving the query results. + +There is limited support for using InfluxQL functions against timestamp values. +The [ELAPSED()](/{{< latest "influxdb" "v1" >}}/query_language/functions/#elapsed) +function returns the difference between subsequent timestamps in a single field. + +#### Can I identify write precision from returned timestamps? + +InfluxDB stores all timestamps as nanosecond values, regardless of the write precision supplied. +InfluxQL silently drops trailing zeros from timestamps which obscures the initial write precision. +Because InfluxDB silently drops trailing zeros on returned timestamps, the write +precision is not recognizable in the returned timestamps. + +#### When should I use single quote versus double quotes in a query? + +Follow these general rules for quotes in InfluxQL queries: + +###### Single quotes +- Use to quote literal string values, like tag values. +- Do **not** use on identifiers like database names, retention policy names, + user names, measurement names, tag keys, and field keys. +- Use on date-time strings. + +###### Double quotes +- Use on identifiers that start with a digit, contain characters other than `[A-z,0-9,_]`, + or that are an [InfluxQL keyword](/{{< latest "influxdb" "v1" >}}/query_language/spec/#keywords). + We generally recommend using double quotes on all identifiers, even if they + don't meet these criteria. +- Do **not** use on date-time strings. + + +```sql +-- Correctly quote usage + +SELECT bikes_available FROM bikes WHERE station_id='9' + +SELECT "bikes_available" FROM "bikes" WHERE "station_id"='9' + +SELECT MIN("avgrq-sz") AS "min_avgrq-sz" FROM telegraf + +SELECT * from "cr@zy" where "p^e"='2' + +SELECT "water_level" FROM "h2o_feet" WHERE time > '2015-08-18T23:00:01.232000000Z' AND time < '2015-09-19' + +-- Incorrect quote usage + +SELECT 'bikes_available' FROM 'bikes' WHERE 'station_id'="9" + +SELECT * from cr@zy where p^e='2' + +SELECT "water_level" FROM "h2o_feet" WHERE time > "2015-08-18T23:00:01.232000000Z" AND time < "2015-09-19" +``` + +#### Why is my query with a `WHERE OR` time clause returning empty results? + +InfluxQL does not support using `OR` in the `WHERE` clause to specify multiple +time ranges and returns an empty response if multiple are specified. +For example, the following query will return an empty response: + +```sql +SELECT * FROM "absolutismus" +WHERE time = '2016-07-31T20:07:00Z' OR time = '2016-07-31T23:07:17Z' +``` + +#### Why does `fill(previous)` return empty results? + +`fill(previous)` doesn't fill a null value if there is no previous value inside +the queried time range. + +#### How do I query data with an identical tag key and field key? + +Use the `::` syntax to specify if the key is a field key or tag key. For example: + +```sql +SELECT * FROM "candied" WHERE "almonds"::field > 51 +SELECT * FROM "candied" WHERE "almonds"::tag='true' +``` + +#### How do I query data across measurements? + +InfluxQL does not support querying multiple measurements +All data must be under a single measurement to query it together. +To perform cross-measurement queries, +[use Flux](/influxdb/v2.5/reference/syntax/flux/flux-vs-influxql/#math-across-measurements). + +#### Does the order timestamps in a query matter? + +No, it doesn't. There is a only a _negligible_ difference between the following queries: + +```sql +SELECT ... FROM ... WHERE time > 'timestamp1' AND time < 'timestamp2' +SELECT ... FROM ... WHERE time < 'timestamp2' AND time > 'timestamp1' +``` + +#### How do I query data by a tag with a null value? + +In your `WHERE` clause, specify an empty or null tag value with `''`. For example: + +```sql +SELECT * FROM "vases" WHERE priceless='' +``` + +{{% cloud-only %}} + +#### Why am I getting the error, "total duration of queries in the last 30s exceeds limit of 25m0s"? + +This error indicates you are exceeding the [Total query time global limit](/influxdb/cloud/account-management/limits/#global-limits) +for your organization. +Potential causes include: + +- A single long-running query. +- Running too many queries at once. +- A combination of both. + +If you are encountering this error due to a single long-running query, the query +and potentially your schema should be analyzed for optimization. +The following resources may help: + +- [Optimize Flux queries](/influxdb/cloud/query-data/optimize-queries/) +- [Schema design best practices](/influxdb/cloud/write-data/best-practices/schema-design/) + +If you are encountering this error due to the number of concurrent queries, +try delaying or staggering queries so they don't all run at the same time. + +{{% /cloud-only %}} + +--- + +## Deleting data + +#### Can I delete a field? + +{{% oss-only %}} + +No. InfluxDB {{< current-version >}} does not support deleting data by field. + +{{% /oss-only %}} + +{{% cloud-only %}} + +Yes. InfluxDB Cloud supports deleting data by field. +Use the `_field` label in your [delete predicate](/influxdb/v2.5/reference/syntax/delete-predicate/) +to identify the field to delete. + +```js +_field == "example-field" +``` + +{{% /cloud-only %}} + +#### Can I delete a measurement? + +Yes. InfluxDB {{< current-version >}} supports deleting data by measurement. +Use the `_measurement` label in your [delete predicate](/influxdb/v2.5/reference/syntax/delete-predicate/) +to identify the measurement to delete. + +```js +_measurement == "example-measurement" +``` + +#### Can I delete multiple measurements at the same time? + +No. InfluxDB {{< current-version >}} does not support deleting multiple measurements +in a single delete request. +To delete multiple measurements, [issue a delete request](/influxdb/v2.5/write-data/delete-data/) +for each measurement. + +#### Do I need to verify that data is deleted? + +It is not necessary to verify delete operations once they have been submitted to the queue. +The `/api/v2/delete` endpoint returns a 204 response when the delete request has been added to the queue. +{{% cloud-only %}}Because the delete queue executes asynchronously, there isn't a way to accurately +predict when the delete operation will be performed at the storage layer.{{% /cloud-only %}} + +If you wish to verify a delete has occurred, try to query the deleted data. +If the query returns results, the data has not been fully deleted. + +--- + +## InfluxDB tasks + +#### How does retrying a task affect relative time ranges? + +When you retry a task that uses relative time ranges, it will query the original +time range of the task execution (run). +Whenever a task executes, InfluxDB sets the [`now` option ](/flux/v0.x/stdlib/universe/#options) +in the task to the scheduled execution time of the task. +When using [`range()`](/flux/v0.x/stdlib/universe/range/) +or other functions that support relative duration values, these duration values +are relative to [`now()`](/flux/v0.x/stdlib/universe/now/), which returns the +value of the `now` option. Every task run has a unique `now` option based on +the time the run was scheduled to execute. + +--- + +## Series and series cardinality + +#### What is series cardinality? + +[Series cardinality](/influxdb/v2.5/reference/glossary/#series-cardinality) is +the total number of unique +{{% cloud-only %}}**measurement**, **tag set**, and **field key** combinations{{% /cloud-only %}} +{{% oss-only %}}**measurement** and **tag set** combinations{{% /oss-only %}} +(series) stored on disk and indexed in memory. + +#### Why does series cardinality matter? + +{{% oss-only %}} + +InfluxDB maintains an in-memory index of every [series](/influxdb/v2.5/reference/glossary/#series)\. +As the number of unique series grows, so does the memory usage. +High series cardinality can force the host operating system to kill the InfluxDB +process with an out of memory (OOM) exception. + +{{% /oss-only %}} + +{{% cloud-only %}} + +InfluxDB maintains an in-memory index of every [series](/influxdb/v2.5/reference/glcloudary/#series). +As the number of unique series grows, it can negatively affect query performance. +Each InfluxDB Cloud organization has a series cardinality limit to prevent +runaway cardinality. For information about adjusting cardinality limits, see +[How do I increase my organization’s rate limits and quotas?](#how-do-i-increase-my-organizations-rate-limits-and-quotas). + +{{% /oss-only %}} + +Use [`influxdb.cardinality()`](/flux/v0.x/stdlib/influxdata/influxdb/cardinality/) in Flux +or [`SHOW SERIES CARDINALITY`](/{{< latest "influxdb" "v1" >}}/query_language/spec/#show-series-cardinality) +in InfluxQL to measure the series cardinality in a bucket. +See [Resolve high series cardinality](/influxdb/v2.5/write-data/best-practices/resolve-high-cardinality/) +for information about reducing series cardinality. + +{{% oss-only %}} + +#### How do I remove series from the index? + +To remove a series from an index: + +1. Use the **`influx` CLI** or **InfluxDB {{< current-version >}} API** to delete points + associated with the series. See [Delete data](/influxdb/v2.5/write-data/delete-data/) + for more information. +2. Use the [`influxd inspect build-tsi` tool](/influxdb/v2.5/reference/cli/influxd/inspect/build-tsi/) + to rebuild your index. + +{{% /oss-only %}} diff --git a/content/influxdb/v2.5/reference/glossary.md b/content/influxdb/v2.5/reference/glossary.md new file mode 100644 index 000000000..33105154d --- /dev/null +++ b/content/influxdb/v2.5/reference/glossary.md @@ -0,0 +1,1276 @@ +--- +title: Glossary +description: > + Terms related to InfluxData products and platforms. +weight: 9 +menu: + influxdb_2_5_ref: + name: Glossary +influxdb/v2.5/tags: [glossary] +--- + +[A](#a) | [B](#b) | [C](#c) | [D](#d) | [E](#e) | [F](#f) | [G](#g) | [H](#h) | [I](#i) | [J](#j) | [K](#k) | [L](#l) | [M](#m) | [N](#n) | [O](#o) | [P](#p) | [Q](#q) | [R](#r) | [S](#s) | [T](#t) | [U](#u) | [V](#v) | [W](#w) | X | Y | Z + +## A + +### abstract syntax tree (AST) + +Tree representation of source code that shows the structure, content, and rules of programming statements and discards additional syntax elements. +The tree is hierarchical, with elements of program statements broken down into their parts. + +For more information about AST design, see [Abstract Syntax Tree on Wikipedia](https://en.wikipedia.org/wiki/Abstract_syntax_tree). +### agent + +A background process started by (or on behalf of) a user that typically requires user input. + +Telegraf is an agent that requires user input (a configuration file) to gather metrics from declared input plugins and sends metrics to declared output plugins, based on the plugins enabled for a configuration. + +Related entries: [input plugin](#input-plugin), [output plugin](#output-plugin), [daemon](#daemon) + +### aggregator plugin + +Receives metrics from input plugins, creates aggregate metrics, and then passes aggregate metrics to configured output plugins. + +Related entries: [input plugin](#input-plugin), [output plugin](#output-plugin), [processor plugin](#processor-plugin) + +### aggregate + +A function that returns an aggregated value across a set of points. +For a list of available aggregation functions, see [Flux aggregate functions](/{{< latest "flux" >}}/function-types#aggregates). + +Related entries: [function](#function), [selector](#selector), [transformation](#transformation) + +## B + +### bar graph + +A visual representation in the InfluxDB user interface used to compare variables (bars) and plot categorical data. +A bar graph has spaces between bars, can be sorted in any order, and bars in the graph typically have the same width. + +Related entries: [histogram](#histogram) + +### batch + +A collection of points in line protocol format, separated by newlines (`0x0A`). +Submitting a batch of points using a single HTTP request to the write endpoints drastically increases performance by reducing the HTTP overhead. +InfluxData typically recommends batch sizes of 5,000-10,000 points. +In some use cases, performance may improve with significantly smaller or larger batches. + +Related entries: [line protocol](/influxdb/v2.5/reference/syntax/line-protocol/), [point](#point) + +### batch size + +The number of lines or individual data points in a line protocol batch. +The Telegraf agent sends metrics to output plugins in batches rather than individually. +Batch size controls the size of each write batch that Telegraf sends to the output plugins. + +Related entries: [output plugin](#output-plugin) + +### bin + +In a cumulative histogram, a bin includes all data points less than or equal to a specified upper bound. +In a normal histogram, a bin includes all data points between the upper and lower bounds. + +### block + +In Flux, a block is a possibly empty sequence of statements within matching braces (`{ }`). +Two types of blocks exist in Flux: + +- Explicit blocks in the source code, for example: + + ``` + Block = "{" StatementList "} + StatementList = { Statement } + ``` + +- Implicit blocks, including: + + - Universe: Encompasses all Flux source text. + - Package: Each package includes a package block that contains Flux source text for the package. + - File: Each file has a file block containing Flux source text in the file. + - Function: Each function literal has a function block with Flux source text (even if not explicitly declared). + +Related entries: [implicit block](#implicit-block), [explicit block](#explicit-block) + +### boolean + +A data type with two possible values: true or false. +By convention, you can express `true` as the integer `1` and false as the integer `0` (zero). +In [annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/), columns that contain +boolean values are annotated with the `boolean` datatype. + +### bucket + +A bucket is a named location where time series data is stored. +All buckets have a [retention period](#retention-period). +A bucket belongs to an organization. + + +### bucket schema + +In InfluxDB Cloud, an explicit bucket schema lets you strictly enforce the data that can be written into one or more measurements in a bucket by defining the column names, tags, fields, and data types allowed for each measurement. By default, buckets in InfluxDB {{< current-version >}} have an `implicit` schema that lets you write data without restrictions on columns, fields, or data types. + +Learn how to [manage bucket schemas](/influxdb/cloud/organizations/buckets/bucket-schema/) in InfluxDB Cloud. + +Related entries: [data type](#data-type), [field](#field), [measurement](#measurement) + +## C + +### check + +Checks are part of queries used in monitoring to read input data and assign a [status](#check-status) (`_level`) based on specified conditions. +For example: + +``` +monitor.check( + crit: (r) => r._value > 90.0, + warn: (r) => r._value > 80.0, + info: (r) => r._value > 60.0, + ok: (r) => r._value <= 20.0, + messageFn: (r) => "The current level is ${r._level}", +) +``` + +This check gives rows with a `_value` greater than 90.0 a crit `_level`; rows greater than 80.0 get a warn `_level`, and so on. + +Learn how to [create a check](/influxdb/v2.5/monitor-alert/checks/create/). + +Related entries: [check status](#check-status), [notification rule](#notification-rule), [notification endpoint](#notification-endpoint) + +### check status + +A [check](#check) gets one of the following statuses (`_level`): `crit`, `info`, `warn`, or `ok`. +Check statuses are written to a status measurement in the `_monitoring` bucket. + +Related entries: [check](#check), [notification rule](#notification-rule), [notification endpoint](#notification-endpoint) + +### CSV + +Comma-separated values (CSV) delimits text between commas to separate values. +A CSV file stores tabular data (numbers and text) in plain text. +Each line of the file is a data record. +Each record consists of one or more fields, separated by commas. +CSV file format is not fully standardized. + +InfluxData uses annotated CSV (comma-separated values) format to encode HTTP responses and results returned to the Flux csv.from() function. +For more detail, see [Annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/). + + + +### co-monitoring dashboard + +The prebuilt co-monitoring dashboard displays details of your instance based on metrics from Telegraf, allowing you to monitor overall performance. + +### collect + +Collect and write time series data to InfluxDB using line protocol, Telegraf or InfluxDB scrapers, the InfluxDB v2 API, influx command line interface (CLI),the InfluxDB user interface (UI), and client libraries. + +### collection interval + +The default global interval for collecting data from each Telegraf input plugin. +The collection interval can be overridden by each individual input plugin's configuration. + +Related entries: [input plugin](#input-plugin) + + + +### collection jitter + +Collection jitter prevents every input plugin from collecting metrics simultaneously, which can have a measurable effect on the system. +For each collection interval, every Telegraf input plugin will sleep for a random time between zero and the collection jitter before collecting the metrics. + +Related entries: [collection interval](#collection-interval), [input plugin](#input-plugin) + +### column + +InfluxDB data is stored in tables within rows and columns. +Columns store tag sets (indexed) and fields sets. +The only required column is _time_, which stores timestamps and is included in all InfluxDB tables. + +### comment + +Use comments with Flux statements to describe your functions. + +### common log format (CLF) + +A standardized text file format used by the InfluxDB web server to create log entries when generating server log files. + +### compaction + +Compressing time series data to optimize disk usage. + +### continuous query (CQ) + +Continuous queries are the predecessor to tasks in InfluxDB {{< current-version >}}. +Continuous queries run automatically and periodically on a database. + +Related entries: [function](#function) + +## D + +### daemon + +A background process that runs without user input. + +### dashboard + +InfluxDB dashboards visualize time series data. +Use dashboards to query and graph data. + +### dashboard variable + +Dashboard template variables define components of a cell query. +Dashboard variables make is easier to interact with and explore your databoard data. +Use the InfluxDB user interface (UI) to add predefined template variables or customize your own template variables. + +### Data Explorer + +Use the Data Explorer in the InfluxDB user interface (UI) to view, add, or delete variables and functions manually or using the Script Editor. + +### data model + +A data model organizes elements of data and standardizes how they relate to one another and to properties of the real world entities. + +Flux uses a data model built from basic data types: tables, records, columns and streams. + + + +### data service + +Stores time series data and handles writes and queries. + +### data source + +A source of data that InfluxDB collects or queries data from. +Examples include InfluxDB buckets, Prometheus, Postgres, MySQL, and InfluxDB clients. + +Related entries: [bucket](#bucket) + +### data type + +A data type is defined by the values it can take, the programming language used, or the operations that can be performed on it. + +InfluxDB supports the following data types: +| Data type | Alias/annotation | +| :--------------- | :----------------- | +| string | | +| boolean | | +| float | double | +| integer | int, long | +| unsigned integer | uint, unsignedLong | +| time | dateTime | + +For more information about different data types, see: +- [annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/) +- [extended annotated CSV](/influxdb/cloud/reference/syntax/annotated-csv/extended/#datatype) +- [line protocol](/influxdb/v2.5/reference/syntax/line-protocol/#data-types-and-format) +- [InfluxQL](/influxdb/v1.8/query_language/spec/#literals) +- [Flux](/influxdb/v2.5/reference/flux/language/types/) +- [InfluxDB](/influxdb/v2.5/reference/syntax/line-protocol/#data-types-and-format) + +### database + +In InfluxDB {{< current-version >}}, a database represents the InfluxDB instance as a whole. + +Related entries: [continuous query](#continuous-query-cq), [user](#user) + +### date-time + +InfluxDB stores the date-time format for each data point in a timestamp with nanosecond-precision Unix time. +Specifying a timestamp is options. +If a timestamp isn't specified for a data point, InfluxDB uses the server’s local nanosecond timestamp in UTC. + +### downsample + +Aggregating high resolution data into lower resolution data to preserve disk space. + +### duration + +A data type that represents a duration of time (1s, 1m, 1h, 1d). +Retention policies are set using durations. +Data older than the duration is automatically dropped from the database. + + + +## E + +### event + +Metrics gathered at irregular time intervals. + +### explicit block + +In Flux, an explicit block is a possibly empty sequence of statements within matching braces (`{ }`) that is defined in the source code, for example: + +``` +Block = "{" StatementList "} +StatementList = { Statement } +``` + +Related entries: [implicit block](#implicit-block), [block](#block) + +### expression + +A combination of one or more constants, variables, operators, and functions. + +## F + +### field + +The key-value pair in InfluxDB's data structure that records metadata and the actual data value. +Fields are required in InfluxDB's data structure and they are not indexed - queries on field values scan all points that match the specified time range and, as a result, are not performant relative to tags. + +*Query tip:* Compare fields to tags; tags are indexed. + +Related entries: [field key](#field-key), [field set](#field-set), [field value](#field-value), [tag](#tag) + +### field key + +The key of the key-value pair. +Field keys are strings and they store metadata. + +Related entries: [field](#field), [field set](#field-set), [field value](#field-value), [tag key](#tag-key) + +### field set + +The collection of field keys and field values on a point. + +Related entries: [field](#field), [field key](#field-key), [field value](#field-value), [point](#point) + +### field value + +The value of a key-value pair. +Field values are the actual data; they can be strings, floats, integers, or booleans. +A field value is always associated with a timestamp. + +Field values are not indexed - queries on field values scan all points that match the specified time range and, as a result, are not performant. + +*Query tip:* Compare field values to tag values; tag values are indexed. + +Related entries: [field](#field), [field key](#field-key), [field set](#field-set), [tag value](#tag-value), [timestamp](#timestamp) + +### file block + +A file block is a fixed-length chunk of data read into memory when requested by an application. + +Related entries: [block](#block) + +### float + +A real number written with a decimal point dividing the integer and fractional parts (`1.0`, `3.14`, `-20.1`). +InfluxDB supports 64-bit float values. +In [annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/), columns that contain +float values are annotated with the `double` datatype. + +### flush interval + +The global interval for flushing data from each Telegraf output plugin to its destination. +This value should not be set lower than the collection interval. + +Related entries: [collection interval](#collection-interval), [flush jitter](#flush-jitter), [output plugin](#output-plugin) + +### flush jitter + +Flush jitter prevents every Telegraf output plugin from sending writes simultaneously, which can overwhelm some data sinks. +Each flush interval, every Telegraf output plugin will sleep for a random time between zero and the flush jitter before emitting metrics. +Flush jitter smooths out write spikes when running a large number of Telegraf instances. + +Related entries: [flush interval](#flush-interval), [output plugin](#output-plugin) + +### Flux + +A lightweight scripting language for querying databases (like InfluxDB) and working with data. + +### function + +Flux functions aggregate, select, and transform time series data. +For a complete list of Flux functions, see [Flux functions](/{{< latest "flux" >}}/stdlib/all-functions/). + +Related entries: [aggregate](#aggregate), [selector](#selector), [transformation](#transformation) + +### function block + +In Flux, each file has a file block containing all Flux source text in that file. +Each function literal has its own function block even if not explicitly declared. + +## G + +### gauge + + A type of visualization that displays the single most recent value for a time series. +A gauge typically displays one or more measures from a single row, and is not designed to display multiple rows of data. +Elements include a range, major and minor tick marks (within the range), and a pointer (needle) indicating the single most recent value. + +### graph + +A diagram that visually depicts the relation between variable quantities measured along specified axes. + +### group key +Group keys determine the schema and contents of tables in Flux output. +A group key is a list of columns for which every row in the table has the same value. +Columns with unique values in each row are not part of the group key. + +### gzip + +gzip is a type of data compression that compress chunks of data, which is restored by unzipping compressed gzip files. +The gzip file extension is `.gz`. + +## H + + + +### histogram + +A visual representation of statistical information that uses rectangles to show the frequency of data items in successive, equal intervals or bins. + +## I + +### identifier + +Identifiers are tokens that refer to task names, bucket names, field keys, +measurement names, tag keys, and user names. +For examples and rules, see [Flux language lexical elements](/{{< latest "flux" >}}/spec/lexical-elements/#identifiers). + +Related entries: +[bucket](#bucket) +[field key](#field-key), +[measurement](#measurement), + +[tag key](#tag-key), +[user](#user) + +### implicit block + +In Flux, an implicit block is a possibly empty sequence of statements within matching braces ({ }) that includes the following types: + + - Universe: Encompasses all Flux source text. + - Package: Each package includes a package block that contains Flux source text for the package. + - File: Each file has a file block containing Flux source text in the file. + - Function: Each function literal has a function block with Flux source text (even if not explicitly declared). + +Related entries: [explict block](#explicit-block), [block](#block) + +### influx + +`influx` is a command line interface (CLI) that interacts with the InfluxDB daemon (influxd). + +### influxd + +`influxd` is the InfluxDB daemon that runs the InfluxDB server and other required processes. + +### InfluxDB + +An open-source time series database (TSDB) developed by InfluxData. +Written in Go and optimized for fast, high-availability storage and retrieval of time series data in fields such as operations monitoring, application metrics, Internet of Things sensor data, and real-time analytics. + +### InfluxDB UI + +The graphical web interface provided by InfluxDB for visualizing data and managing InfluxDB functionality. + +### InfluxQL + +The SQL-like query language used to query data in InfluxDB 1.x. +The preferred method for querying data in InfluxDB {{< current-version >}} is the [Flux](#flux) language. + +### input plugin + +Telegraf input plugins actively gather metrics and deliver them to the core agent, where aggregator, processor, and output plugins can operate on the metrics. +In order to activate an input plugin, it needs to be enabled and configured in Telegraf's configuration file. + +Related entries: [aggregator plugin](#aggregator-plugin), [collection interval](#collection-interval), [output plugin](#output-plugin), [processor plugin](#processor-plugin) + +### instance + +An entity comprising data on a server (or virtual server in cloud computing). + + +{{% oss-only %}} + +### instance owner + +A type of admin role for a user. +Instance owners have read/write permissions for all resources within the instance. + +{{% /oss-only %}} + +### integer + +A whole number that is positive, negative, or zero (`0`, `-5`, `143`). +InfluxDB supports 64-bit integers (minimum: `-9223372036854775808`, maximum: `9223372036854775807`). +In [annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/), columns that contain +integers are annotated with the `long` datatype. + +Related entries: [unsigned integer](#unsigned-integer) + +## J + +### JWT + +Typically, JSON web tokens (JWT) are used to authenticate users between an identity provider and a service provider. +A server can generate a JWT to assert any business processes. +For example, an "admin" token sent to a client can prove the client is logged in as admin. +Tokens are signed by one party's private key (typically, the server). +Private keys are used by both parties to verify that a token is legitimate. + +JWT uses an open standard specified in [RFC 7519](https://tools.ietf.org/html/rfc7519). + +### Jaeger + +Open source tracing used in distributed systems to monitor and troubleshoot transactions. + +### JSON + +JavaScript Object Notation (JSON) is an open-standard file format that uses human-readable text to transmit data objects consisting of attribute–value pairs and array data types. + +## K + +### keyword + +A keyword is reserved by a program because it has special meaning. +Every programming language has a set of keywords (reserved names) that cannot be used as an identifier. + +See a list of [Flux keywords](/{{< latest "flux" >}}/spec/lexical-elements/#keywords). + +## L + +### literal + +A literal is value in an expression, a number, character, string, function, record, or array. +Literal values are interpreted as defined. + +See examples of [Flux literals](/{{< latest "flux" >}}/spec/expressions/#examples-of-function-literals). + + + +### logs + +Logs record information. +Event logs describe system events and activity that help to describe and diagnose problems. +Transaction logs describe changes to stored data that help recover data if a database crashes or other errors occur. + +The InfluxDB {{< current-version >}} user interface (UI) can be used to view log history and data. + +### Line protocol (LP) + +The text based format for writing points to InfluxDB. +See [line protocol](/influxdb/v2.5/reference/syntax/line-protocol/). + +## M + +### measurement + +The part of InfluxDB's structure that describes the data stored in the associated fields. +Measurements are strings. + +Related entries: [field](#field), [series](#series) + +### member + +A user in an organization. + + + + +### metric + +Data tracked over time. + +### metric buffer + +The metric buffer caches individual metrics when writes are failing for an Telegraf output plugin. +Telegraf will attempt to flush the buffer upon a successful write to the output. +The oldest metrics are dropped first when this buffer fills. + +Related entries: [output plugin](#output-plugin) + +### missing values + +Denoted by a null value. +Identifies missing information, which may be useful to include in an error message. + +The Flux data model includes [Missing values (null)](/{{< latest "flux" >}}/spec/data-model/#missing-values-null). + +## N + +### node + +An independent `influxd` process. + +Related entries: [server](#server) + +### notification endpoint + + The notification endpoint specifies the Slack or PagerDuty endpoint to send a notification and contains configuration details for connecting to the endpoint. +Learn how to [create a notification endpoint](/influxdb/v2.5/monitor-alert/notification-endpoints/create). + +Related entries: [check](#check), [notification rule](#notification-rule) + +### notification rule + +A notification rule specifies a status level (and tags) to alert on, the notification message to send for the specified status level (or change in status level), and the interval or schedule you want to check the status level (and tags). +If conditions are met, the notification rule sends a message to the [notification endpoint](#notification-endpoint) and stores a receipt in a notification measurement in the `_monitoring` bucket. +For example, a notification rule may specify a message to send to a Slack endpoint when a status level is critical (`crit`). + +Learn how to [create a notification rule](/influxdb/v2.5/monitor-alert/notification-rules/create). + +Related entries: [check](#check), [notification endpoint](#notification-endpoint) + +### now() + +The local server's nanosecond timestamp. + +### null + +A data type that represents a missing or unknown value. +Denoted by the null value. + +## O + +### operator + +A symbol that usually represents an action or process. +For example: `+`, `-`, `>`. + +### operand + +The object or value on either side of an operator. + +### option + +Represents a storage location for any value of a specified type. +Mutable, can hold different values during its lifetime. + +See built-in Flux [options](/{{< latest "flux" >}}/spec/options/). + +### option assignment + +An option assignment binds an identifier to an option. + +Learn about the [option assignment](/{{< latest "flux" >}}/spec/assignment-scope/#option-assignment) in Flux. + +### organization + +A workspace for a group of users. +All dashboards, tasks, buckets, members, and so on, belong to an organization. + + +### owner + +A type of role for a user. +Owners have read/write permissions. +Users can have owner roles for bucket and organization resources. + +Role permissions are separate from API token permissions. For additional +information on API tokens, see [token](#tokens). + +### output plugin + +Telegraf output plugins deliver metrics to their configured destination. +To activate an output plugin, enable and configure the plugin in Telegraf's configuration file. + +Related entries: [aggregator plugin](#aggregator-plugin), [flush interval](#flush-interval), [input plugin](#input-plugin), [processor plugin](#processor-plugin) + +## P + +### parameter + +A key-value pair used to pass information to functions. + +### pipe + +Method for passing information from one process to another. +For example, an output parameter from one process is input to another process. +Information passed through a pipe is retained until the receiving process reads the information. + +### pipe-forward operator + +An operator (`|>`) used in Flux to chain operations together. +Specifies the output from a function is input to next function. + +### point + +In InfluxDB, a point represents a single data record, similar to a row in a SQL database table. +Each point: + +- has a measurement, a tag set, a field key, a field value, and a timestamp; +- is uniquely identified by its series and timestamp. + +In a series, each point has a unique timestamp. +If you write a point to a series with a timestamp that matches an existing point, the field set becomes a union of the old and new field set, where any ties go to the new field set. + +Related entries: [measurement](#measurement), [tag set](#tag-set), [field set](#field-set), [timestamp](#timestamp) + +### precision + +The precision configuration setting determines the timestamp precision retained for input data points. +All incoming timestamps are truncated to the specified precision. +Valid precisions are `ns`, `us` or `µs`, `ms`, and `s`. + +In Telegraf, truncated timestamps are padded with zeros to create a nanosecond timestamp. +Telegraf output plugins emit timestamps in nanoseconds. +For example, if the precision is set to `ms`, the nanosecond epoch timestamp `1480000000123456789` is truncated to `1480000000123` in millisecond precision and padded with zeroes to make a new, less precise nanosecond timestamp of `1480000000123000000`. +Telegraf output plugins do not alter the timestamp further. +The precision setting is ignored for service input plugins. + +Related entries: [aggregator plugin](#aggregator-plugin), [input plugin](#input-plugin), [output plugin](#output-plugin), [processor plugin](#processor-plugin), [service input plugin](#service-input-plugin) + +### predicate expression + +A predicate expression compares two values and returns `true` or `false` based on +the relationship between the two values. +A predicate expression is comprised of a left operand, a comparison operator, and a right operand. + +### predicate function +A Flux predicate function is an anonymous function that returns `true` or `false` +based on one or more [predicate expressions](#predicate-expression). + +###### Example predicate function +```js +(r) => r.foo == "bar" and r.baz != "quz" +``` + +### process + +A set of predetermined rules. +A process can refer to instructions being executed by the computer processor or refer to the act of manipulating data. + +In Flux, you can process data with [InfluxDB tasks](/influxdb/v2.5/process-data/get-started/). + +### processor plugin + +Telegraf processor plugins transform, decorate, and filter metrics collected by input plugins, passing the transformed metrics to the output plugins. + +Related entries: [aggregator plugin](#aggregator-plugin), [input plugin](#input-plugin), [output plugin](#output-plugin) + +### Prometheus format + +A simple text-based format for exposing metrics and ingesting them into Prometheus or InfluxDB using InfluxDB scrapers. + +Collect data from any accessible endpoint that provides data in the [Prometheus exposition format](https://prometheus.io/docs/instrumenting/exposition_formats/). + +## Q + +### query + +A Flux script that returns time series data, including [tags](#tag) and [timestamps](#timestamp). + +See [Query data in InfluxDB](/influxdb/v2.5/query-data/). + +## R + +### REPL + +A Read-Eval-Print Loop (REPL) is an interactive programming environment where you type a command and immediately see the result. +See [Flux REPL](/influxdb/v2.5/tools/flux-repl/) for information on building and using the REPL. + +### record + +A tuple of named values represented using a record type. + +### regular expressions + +Regular expressions (regex or regexp) are patterns used to match character combinations in strings. + +### rejected points + +In a batch of data, points that InfluxDB couldn't write to a bucket. +Field type conflicts are a common cause of rejected points. + +### retention period +The [duration](#duration) of time that a bucket retains data. +InfluxDB drops points with timestamps older than their bucket's retention period. +The minimum retention period is **one hour**. + +Related entries: [bucket](#bucket), [shard group duration](#shard-group-duration) + + + +### retention policy (RP) +Retention policy is an InfluxDB 1.x concept that represents the duration of time +that each data point in the retention policy persists. +The InfluxDB 2.x equivalent is [retention period](#retention-period). +For more information about retention policies, see the +[latest 1.x documentation](/{{< latest "influxdb" "v1" >}}/concepts/glossary/#retention-policy-rp). + +Related entries: [retention period](#retention-period), + +### RFC3339 timestamp +A timestamp that uses the human-readable DateTime format proposed in +[RFC 3339](https://tools.ietf.org/html/rfc3339) (for example: `2020-01-01T00:00:00.00Z`). +Flux and InfluxDB clients return query results with RFC3339 timestamps. + +Related entries: [RFC3339Nano timestamp](#rfc3339nano-timestamp), [timestamp](#timestamp), [unix timestamp](#unix-timestamp) + +### RFC3339Nano timestamp +A [Golang representation of the RFC 3339 DateTime format](https://go.dev/src/time/format.go) that uses nanosecond resolution--for example: +`2006-01-02T15:04:05.999999999Z07:00`. + +InfluxDB clients can return RFC3339Nano timestamps in log events and CSV-formatted query results. + +Related entries: [RFC3339 timestamp](#rfc3339-timestamp), [timestamp](#timestamp), [unix timestamp](#unix-timestamp) + +## S + +### schema + +How data is organized in InfluxDB. +The fundamentals of the InfluxDB schema are buckets (which include retention policies), series, measurements, tag keys, tag values, and field keys. + +Related entries: [bucket](#bucket), [field key](#field-key), [measurement](#measurement), [series](#series), [tag key](#tag-key), [tag value](#tag-value) + +### scrape + +InfluxDB scrapes data from specified targets at regular intervals and writes the data to an InfluxDB bucket. +Data can be scraped from any accessible endpoint that provides data in the [Prometheus exposition format](https://prometheus.io/docs/instrumenting/exposition_formats/). + +### secret +Secrets are key-value pairs that contain information you want to control access to, such as API keys, passwords, or certificates. + +### selector + +A Flux function that returns a single point from the range of specified points. +See [Flux selector functions](/{{< latest "flux" >}}/stdlib/universe/) for a complete list of available selector functions. + +Related entries: [aggregate](#aggregate), [function](#function), [transformation](#transformation) + +### series + +A collection of data in the InfluxDB data structure that share a common +{{% cloud-only %}}**measurement**, **tag set**, and **field key**.{{% /cloud-only %}} +{{% oss-only %}}**measurement** and **tag set**.{{% /oss-only %}} + +Related entries: [field set](#field-set), [measurement](#measurement), [tag set](#tag-set) + +### series cardinality + +The number of unique measurement, tag set, and field key combinations in an InfluxDB bucket. + +For example, assume that an InfluxDB bucket has one measurement. +The single measurement has two tag keys: `email` and `status`. +If there are three different `email`s, and each email address is associated with two +different `status`es, the series cardinality for the measurement is 6 +(3 × 2 = 6): + +| email | status | +| :-------------------- | :----- | +| lorr@influxdata.com | start | +| lorr@influxdata.com | finish | +| marv@influxdata.com | start | +| marv@influxdata.com | finish | +| cliff@influxdata.com | start | +| cliff@influxdata.com | finish | + +In some cases, performing this multiplication may overestimate series cardinality +because of the presence of dependent tags. +Dependent tags are scoped by another tag and do not increase series cardinality. +If we add the tag `firstname` to the example above, the series cardinality +would not be 18 (3 × 2 × 3 = 18). +The series cardinality would remain unchanged at 6, as `firstname` is already scoped by the `email` tag: + +| email | status | firstname | +| :------------------- | :----- | :-------- | +| lorr@influxdata.com | start | lorraine | +| lorr@influxdata.com | finish | lorraine | +| marv@influxdata.com | start | marvin | +| marv@influxdata.com | finish | marvin | +| cliff@influxdata.com | start | clifford | +| cliff@influxdata.com | finish | clifford | + +##### Query for cardinality: +- **Flux:** [influxdb.cardinality()](/{{< latest "flux" >}}/stdlib/influxdb/cardinality/) +- **InfluxQL:** [SHOW CARDINALITY](/{{< latest "influxdb" "v1" >}}/query_language/spec/#show-cardinality) + +Related entries: [field key](#field-key),[measurement](#measurement), [tag key](#tag-key), [tag set](#tag-set) + +### series file + +A file created and used by the **InfluxDB OSS storage engine** +that contains a set of all series keys across the entire database. + +### series key + +A series key identifies a particular series by measurement, tag set, and field key. + +For example: + +``` +# measurement, tag set, field key +h2o_level, location=santa_monica, h2o_feet +``` + +Related entries: [series](#series) + +### server + +A computer, virtual or physical, running InfluxDB. + + +Related entries: [node](#node) + +### service input plugin + +Telegraf input plugins that run in a passive collection mode while the Telegraf agent is running. +Service input plugins listen on a socket for known protocol inputs, or apply their own logic to ingested metrics before delivering metrics to the Telegraf agent. + +Related entries: [aggregator plugin](#aggregator-plugin), [input plugin](#input-plugin), [output plugin](#output-plugin), [processor plugin](#processor-plugin) + +### shard + +A shard contains encoded and compressed data for a specific set of [series](#series). +A shard consists of one or more [TSM files](#tsm-time-structured-merge-tree) on disk. +All points in a series in a given shard group are stored in the same shard (TSM file) on disk. +A shard belongs to a single [shard group](#shard-group). + +For more information, see [Shards and shard groups (OSS)](/influxdb/%762.1/reference/internals/shards/). + +Related entries: [series](#series), [shard duration](#shard-duration), +[shard group](#shard-group), [tsm](#tsm-time-structured-merge-tree) + +### shard group + +Shard groups are logical containers for shards organized by [bucket](#bucket). +Every bucket with data has at least one shard group. +A shard group contains all shards with data for the time interval covered by the shard group. +The interval spanned by each shard group is the [shard group duration](#shard-group-duration). + +For more information, see [Shards and shard groups (OSS)](/influxdb/%762.1/reference/internals/shards/). + +Related entries: [bucket](#bucket), [retention period](#retention-period), +[series](#series), [shard](#shard), [shard duration](#shard-duration) + +### shard group duration + +The duration of time or interval that each [shard group](#shard-group) covers. Set the `shard-group-duration` for each [bucket](#bucket). + +For more information, see: + +- [Shards and shard groups (OSS)](/influxdb/%762.1/reference/internals/shards/) +- [Manage buckets](/influxdb/v2.5/organizations/buckets/) + + + +### Single Stat + +A visualization that displays the numeric value of the most recent point in a table (or series) returned by a query. + +### Snappy compression + +InfluxDB uses snappy compression to compress batches of points. +To improve space and disk IO efficiency, each batch is compressed before being written to disk. + + + +### step-plot + +A data visualization that displays time series data in a staircase graph. +Generate a step-plot using the step [interpolation option for line graphs](/influxdb/v2.5/visualize-data/visualization-types/graph/#options). + +### stream + +Flux processes streams of data. +A stream includes a series of tables over a sequence of time intervals. + +### string + +A data type used to represent text. +In [annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/), columns that contain +string values are annotated with the `string` datatype. + +## T + +### TCP + +InfluxDB uses Transmission Control Protocol (TCP) port 8086 for client-server communication over the InfluxDB HTTP API. + + + +### table + +Flux processes a series of tables for a specified time series. +These tables in sequence result in a stream of data. + +### tag + +The key-value pair in InfluxDB's data structure that records metadata. +Tags are an optional part of InfluxDB's data structure but they are useful for storing commonly-queried metadata; tags are indexed so queries on tags are performant. +*Query tip:* Compare tags to fields; fields are not indexed. + +Related entries: [field](#field), [tag key](#tag-key), [tag set](#tag-set), [tag value](#tag-value) + +### tag key + +The key of a tag key-value pair. +Tag keys are strings and store metadata. +Tag keys are indexed so queries on tag keys are processed quickly. + +*Query tip:* Compare tag keys to field keys. +Field keys are not indexed. + +Related entries: [field key](#field-key), [tag](#tag), [tag set](#tag-set), [tag value](#tag-value) + +### tag set + +The collection of tag keys and tag values on a point. + +Related entries: [point](#point), [series](#series), [tag](#tag), [tag key](#tag-key), [tag value](#tag-value) + +### tag value + +The value of a tag key-value pair. +Tag values are strings and they store metadata. +Tag values are indexed so queries on tag values are processed quickly. + +Related entries: [tag](#tag), [tag key](#tag-key), [tag set](#tag-set) + +### task + +A scheduled Flux query that runs periodically and may store results in a specified measurement. +Examples include downsampling and batch jobs. +For more information, see [Process Data with InfluxDB tasks](/influxdb/v2.5/process-data/). + +Related entries: [function](#function) + +### technical preview + +A new feature released to gather feedback from customers and the InfluxDB community. Send feedback to InfluxData via [Community Slack](https://app.slack.com/client/TH8RGQX5Z) or our [Community Site](https://community.influxdata.com/). + +### Telegraf + +A plugin-driven agent that collects, processes, aggregates, and writes metrics. + +Related entries: [Automatically configure Telegraf](/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/), [Manually configure Telegraf](/influxdb/v2.5/write-data/no-code/use-telegraf/manual-config/), [Telegraf plugins](/{{< latest "telegraf" >}}/plugins//), [Use Telegraf to collect data](/influxdb/v2.5/write-data/no-code/use-telegraf/), [View a Telegraf configuration](/influxdb/v2.5/telegraf-configs/view/) + +### time (data type) + +A data type that represents a single point in time with nanosecond precision. + +### time series data + +Sequence of data points typically consisting of successive measurements made from the same source over a time interval. +Time series data shows how data evolves over time. +On a time series data graph, one of the axes is always time. +Time series data may be regular or irregular. +Regular time series data changes in constant intervals. +Irregular time series data changes at non-constant intervals. + +### timestamp + +The date and time associated with a point. +Time in InfluxDB is in UTC. + +To specify time when writing data, see [Elements of line protocol](/influxdb/v2.5/reference/syntax/line-protocol/#elements-of-line-protocol). +To specify time when querying data, see [Query InfluxDB with Flux](/influxdb/v2.5/query-data/get-started/query-influxdb/#2-specify-a-time-range). + +Related entries: [point](#point), [unix timestamp](#unix-timestamp), [RFC3339 timestamp](#rfc3339-timestamp) + +### token + +Tokens (or API tokens) verify user and organization permissions in InfluxDB. +There are different types of API tokens: + +{{% oss-only %}} + +- **Operator token:** grants full read and write access to all resources in **all organizations in InfluxDB OSS 2.x**. _InfluxDB Cloud does not support Operator tokens._ +- **All-Access token:** grants full read and write access to all resources in an organization. +- **Read/Write token:** grants read or write access to specific resources in an organization. + +{{% /oss-only %}} +{{% cloud-only %}} + +- **All-Access token:** grants full read and write access to all resources in an organization. +- **Read/Write token:** grants read or write access to specific resources in an organization. + +{{% /cloud-only %}} + +Related entries: [Create a token](/influxdb/v2.5/security/tokens/create-token/). + +### tracing + +By default, tracing is disabled in InfluxDB OSS. +To enable tracing or set other InfluxDB OSS configuration options, +see [InfluxDB OSS configuration options](/influxdb/v2%2E0/reference/config-options/). + +### transformation + +An InfluxQL function that returns a value or a set of values calculated from specified points, but does not return an aggregated value across those points. +See [InfluxQL functions](/{{< latest "influxdb" "v1" >}}/query_language/functions/#transformations) for a complete list of the available and upcoming aggregations. + +Related entries: [aggregate](#aggregate), [function](#function), [selector](#selector) + +### TSI (Time Series Index) + +TSI uses the operating system's page cache to pull frequently accessed data into memory and keep infrequently accessed data on disk. + +### TSL + +The Time Series Logs (TSL) extension (`.tsl`) identifies Time Series Index (TSI) log files, generated by the tsi1 engine. + +### TSM (Time Structured Merge tree) + +A data storage format that allows greater compaction and higher write and read throughput than B+ or LSM tree implementations. +For more information, see [Storage engine](/{{< latest "influxdb" "v1" >}}/concepts/storage_engine/). + +Related entries: [TSI](#tsi-time-series-index) + +## U + +### UDP + +User Datagram Protocol is a packet of information. +When a request is made, a UDP packet is sent to the recipient. +The sender doesn't verify the packet is received. +The sender continues to send the next packets. +This means computers can communicate more quickly. +This protocol is used when speed is desirable and error correction is not necessary. + +### universe block + +An implicit block that encompasses all Flux source text in a universe block. + +### unix timestamp + +Counts time since **Unix Epoch (1970-01-01T00:00:00Z UTC)** in specified units ([precision](#precision)). +Specify timestamp precision when [writing data to InfluxDB](/influxdb/v2.5/write-data/). +InfluxDB supports the following unix timestamp precisions: + +| Precision | Description | Example | +|:--------- |:----------- |:------- | +| `ns` | Nanoseconds | `1577836800000000000` | +| `us` | Microseconds | `1577836800000000` | +| `ms` | Milliseconds | `1577836800000` | +| `s` | Seconds | `1577836800` | + +

The examples above represent 2020-01-01T00:00:00Z UTC.

+ +Related entries: [timestamp](#timestamp), [RFC3339 timestamp](#rfc3339-timestamp) + +### unsigned integer +A whole number that is positive or zero (`0`, `143`). Also known as a "uinteger." +InfluxDB supports 64-bit unsigned integers (minimum: `0`, maximum: `18446744073709551615`). +In [annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/), columns that contain +integers are annotated with the `unisgnedLong` datatype. + +Related entries: [integer](#integer) + +### user + +InfluxDB users are granted permission to access to InfluxDB. +Users are added as a member of an organization and are given a unique API token. + +## V + +### values per second + +The preferred measurement of the rate at which data are persisted to InfluxDB. +Write speeds are generally quoted in values per second. + +To calculate the values per second rate, multiply the number of points written +per second by the number of values stored per point. +For example, if the points have four fields each, and a batch of 5000 points is +written 10 times per second, the values per second rate is: + +**4 field values per point** × **5000 points per batch** × **10 batches per second** = **200,000 values per second** + + +Related entries: [batch](#batch), [field](#field), [point](#point) + +### variable + +A storage location (identified by a memory address) paired with an associated symbolic name (an identifier). +A variable contains some known or unknown quantity of information referred to as a value. + +### variable assignment + +A statement that sets or updates the value stored in a variable. + +In Flux, the variable assignment creates a variable bound to an identifier and gives it a type and value. +A variable keeps the same type and value for the remainder of its lifetime. +An identifier assigned to a variable in a block cannot be reassigned in the same block. + +## W + + + +### windowing + +Grouping data based on specified time intervals. +For information about how to window in Flux, see [Window and aggregate data with Flux](/influxdb/v2.5/query-data/flux/window-aggregate/). diff --git a/content/influxdb/v2.5/reference/internals/_index.md b/content/influxdb/v2.5/reference/internals/_index.md new file mode 100644 index 000000000..08f68f835 --- /dev/null +++ b/content/influxdb/v2.5/reference/internals/_index.md @@ -0,0 +1,9 @@ +--- +title: InfluxDB internals +menu: + influxdb_2_5_ref: + name: InfluxDB internals +weight: 7 +--- + +{{< children >}} diff --git a/content/influxdb/v2.5/reference/internals/data-retention.md b/content/influxdb/v2.5/reference/internals/data-retention.md new file mode 100644 index 000000000..994941986 --- /dev/null +++ b/content/influxdb/v2.5/reference/internals/data-retention.md @@ -0,0 +1,81 @@ +--- +title: Data retention in InfluxDB +description: > + The InfluxDB retention service checks for and removes data with timestamps beyond + the defined retention period of the bucket the data is stored in. +weight: 103 +menu: + influxdb_2_5_ref: + name: Data retention + parent: InfluxDB internals +influxdb/v2.5/tags: [storage, internals] +related: + - /influxdb/v2.5/reference/internals/shards/ + - /influxdb/v2.5/reference/internals/storage-engine/ + - /influxdb/v2.5/admin/internals/ +--- + +The **InfluxDB retention enforcement service** checks for and removes data with +timestamps beyond the defined retention period of the +[bucket](/influxdb/v2.5/reference/glossary/#bucket) the data is stored in. +This service is designed to automatically delete "expired" data and optimize disk +usage without any user intervention. + +By default, the retention enforcement service runs every 30 minutes. +You can +configure this interval with the +[`storage-retention-check-interval`](/influxdb/v2.5/reference/config-options/#storage-retention-check-interval) +configuration option. + +- [Bucket retention period](#bucket-retention-period) +- [Shard group duration](#shard-group-duration) +- [When does data actually get deleted?](#when-does-data-actually-get-deleted) + +## Bucket retention period +A **bucket retention period** is the duration of time that a bucket retains data. +Retention periods can be as short as an hour or infinite. +[Points](/influxdb/v2.5/reference/glossary/#point) in a bucket with timestamps +beyond the defined retention period (relative to now) are _eligible_ for deletion. + +## Shard group duration +InfluxDB stores data on disk in [shards](/influxdb/v2.5/reference/glossary/#shard). +Each shard belongs to a shard group and each shard group has a shard group duration. +The **shard group duration** defines the duration of time that each +shard in the shard group covers. +Each shard contains only points with timestamps in a specific time range defined +by the shard group duration. + +By default, shard group durations are set automatically based on the bucket retention +period, but can also be explicitly defined when creating or updating a bucket. + +_For more information, see [InfluxDB shard group duration](/influxdb/v2.5/reference/internals/shards/#shard-group-duration)._ + +{{% note %}} +#### View bucket retention periods and shard group durations +Use the [`influx bucket list` command](/influxdb/v2.5/reference/cli/influx/bucket/list/) +to view the retention period and shard group duration of buckets in your organization. +{{% /note %}} + +## When does data actually get deleted? +The InfluxDB retention enforcement service runs at regular intervals and deletes +[shard groups](/influxdb/v2.5/reference/internals/shards/#shard-groups), not individual points. +The service will only delete a shard group when the entire time range covered by +the shard group is beyond the bucket retention period. + +{{% note %}} +#### Data is queryable until deleted +Even though data may be older than the specified bucket retention period, +it is queryable until removed by the retention enforcement service. +{{% /note %}} + +To calculate the possible time data will persist before being deleted: + +- **minimum**: `bucket-retention-period` +- **maximum** `bucket-retention-period + shard-group-duration` + +For example, if your bucket retention period is three days (`3d`) and your +shard group duration is one day (`1d`), the retention enforcement service +deletes all shard groups with data that is **three to four days old** the next +time the service runs. + +{{< html-diagram/data-retention >}} diff --git a/content/influxdb/v2.5/reference/internals/file-system-layout.md b/content/influxdb/v2.5/reference/internals/file-system-layout.md new file mode 100644 index 000000000..3161960a4 --- /dev/null +++ b/content/influxdb/v2.5/reference/internals/file-system-layout.md @@ -0,0 +1,302 @@ +--- +title: InfluxDB file system layout +description: > + The InfluxDB file system layout depends on the operating system, package manager, + or containerization platform used to install InfluxDB. +weight: 102 +menu: + influxdb_2_5_ref: + name: File system layout + parent: InfluxDB internals +influxdb/v2.5/tags: [storage, internals] +related: + - /influxdb/v2.5/admin/internals/ +--- + +The InfluxDB file system layout depends on the operating system, installation method, +or containerization platform used to install InfluxDB. + +- [InfluxDB file structure](#influxdb-file-structure) +- [File system layout](#file-system-layout) + +## InfluxDB file structure +The InfluxDB file structure includes of the following: + +#### Engine path +Directory path to the [storage engine](/{{< latest "influxdb" >}}/reference/internals/storage-engine/), +where InfluxDB stores time series data, includes the following directories: + +- **data**: Stores time-structured merge tree (TSM) files. + For more information about the structure of the `data` directory, see + [TSM directories and files layout](#tsm-directories-and-files-layout). +- **replicationq**: Store the replication queue for the [InfluxDB replication service](/influxdb/v2.5/write-data/replication/). +- **wal**: Stores write-ahead log (WAL) files. + For more information about the structure of the `wal` directory, see + [WAL directories and files layout](#wal-directories-and-files-layout). + +To customize this path, use the [engine-path](/influxdb/v2.5/reference/config-options/#engine-path) +configuration option. + +#### Bolt path +File path to the [Boltdb](https://github.com/boltdb/bolt) database, a file-based +key-value store for non-time series data, such as InfluxDB users, dashboards, tasks, etc. +To customize this path, use the [bolt-path](/influxdb/v2.5/reference/config-options/#bolt-path) +configuration option. + +#### Configs path +File path to [`influx` CLI connection configurations](/influxdb/v2.5/reference/cli/influx/config/) (configs). +To customize this path, use the `--configs-path` flag with `influx` CLI commands. + +#### InfluxDB configuration files +Some operating systems and package managers store a default InfluxDB (`influxd`) configuration file on disk. +For more information about using InfluxDB configuration files, see +[Configuration options](/influxdb/v2.5/reference/config-options/). + +## File system layout +{{< tabs-wrapper >}} +{{% tabs %}} +[macOS](#) +[Linux](#) +[Windows](#) +[Docker](#) +[Kubernetes](#) +{{% /tabs %}} + +{{% tab-content %}} + +#### macOS default paths +| Path | Default | +|:------------------------------|:-------------------------------| +| [Engine path](#engine-path) | `~/.influxdbv2/engine/` | +| [Bolt path](#bolt-path) | `~/.influxdbv2/influxd.bolt` | +| [SQLite path](#sqlite-path) | `~/.influxdbv2/influxd.sqlite` | +| [Configs path](#configs-path) | `~/.influxdbv2/configs` | + +#### macOS file system overview +{{% filesystem-diagram %}} +- ~/.influxdbv2/ + - engine/ + - data/ + - _TSM directories and files_ + - wal/ + - _WAL directories and files_ + - configs + - influxd.bolt + - influxd.sqlite +{{% /filesystem-diagram %}} +{{% /tab-content %}} + + + +{{% tab-content %}} +When installing InfluxDB on Linux, you can download and install the `influxd` binary, +or you can use a package manager. +Which installation method you use determines the file system layout. + +- [Installed as a standalone binary](#installed-as-a-standalone-binary) +- [Installed as a package](#installed-as-a-package) + +### Installed as a standalone binary + +#### Linux default paths (standalone binary) +| Path | Default | +|:---- |:------- | +| [Engine path](#engine-path) | `~/.influxdbv2/engine/` | +| [Bolt path](#bolt-path) | `~/.influxdbv2/influxd.bolt` | +| [SQLite path](#sqlite-path) | `~/.influxdbv2/influxd.sqlite` | +| [Configs path](#configs-path) | `~/.influxdbv2/configs` | + +#### Linux file system overview (standalone binary) +{{% filesystem-diagram %}} +- ~/.influxdbv2/ + - engine/ + - data/ + - _TSM directories and files_ + - wal/ + - _WAL directories and files_ + - configs + - influxd.bolt + - influxd.sqlite +{{% /filesystem-diagram %}} + +### Installed as a package +InfluxDB {{< current-version >}} supports **.deb-** and **.rpm-based** Linux package managers. +The file system layout is the same with each. + +#### Linux default paths (package) +| Path | Default | +|:----------------------------------------------------------|:-----------------------------------| +| [Engine path](#engine-path) | `/var/lib/influxdb/engine/` | +| [Bolt path](#bolt-path) | `/var/lib/influxdb/influxd.bolt` | +| [SQLite path](#sqlite-path) | `/var/lib/influxdb/influxd.sqlite` | +| [Configs path](#configs-path) | `/var/lib/influxdb/configs` | +| [Default config file path](#influxdb-configuration-files) | `/etc/influxdb/config.toml` | + +#### Linux file system overview (package) +{{% filesystem-diagram %}} +- /var/lib/influxdb/ + - engine/ + - data/ + - _TSM directories and files_ + - wal/ + - _WAL directories and files_ + - configs + - influxd.bolt + - influxd.sqlite +- /etc/influxdb/ + - config.toml _(influxd configuration file)_ +{{% /filesystem-diagram %}} +{{% /tab-content %}} + + + +{{% tab-content %}} + +#### Windows default paths +| Path | Default | +|:---- |:------- | +| [Engine path](#engine-path) | `%USERPROFILE%\.influxdbv2\engine\` | +| [Bolt path](#bolt-path) | `%USERPROFILE%\.influxdbv2\influxd.bolt` | +| [SQLite path](#sqlite-path) | `%USERPROFILE%\.influxdbv2\influxd.sqlite` | +| [Configs path](#configs-path) | `%USERPROFILE%\.influxdbv2\configs` | + +#### Windows file system overview +{{% filesystem-diagram %}} +- %USERPROFILE%\\.influxdbv2\ + - engine\ + - data\ + - _TSM directories and files_ + - wal\ + - _WAL directories and files_ + - configs + - influxd.bolt + - influxd.sqlite +{{% /filesystem-diagram %}} +{{% /tab-content %}} + + + +{{% tab-content %}} +InfluxDB Docker images are available from both [Dockerhub](https://hub.docker.com/_/influxdb) +and [Quay.io](https://quay.io/repository/influxdb/influxdb?tab=tags). +Each have a unique InfluxDB file system layout. + +- [Dockerhub](#dockerhub) +- [Quay.io](#quayio) + +### Dockerhub + +{{% note %}} +The InfluxDB Dockerhub image uses `/var/lib/influxdb2` instead of `/var/lib/influxdb` +so you can easily mount separate volumes for InfluxDB 1.x and 2.x data during the +[upgrade process](/influxdb/v2.5/upgrade/v1-to-v2/docker/). +{{% /note %}} + +#### Dockerhub default paths +| Path | Default | +|:---- |:------- | +| [Engine path](#engine-path) | `/var/lib/influxdb2/engine/` | +| [Bolt path](#bolt-path) | `/var/lib/influxdb2/influxd.bolt` | +| [SQLite path](#sqlite-path) | `/var/lib/influxdb2/influxd.sqlite` | +| [Configs path](#configs-path) | `/etc/influxdb2/configs` | + +#### Dockerhub file system overview +{{% filesystem-diagram %}} +- /var/lib/influxdb2/ + - engine/ + - data/ + - _TSM directories and files_ + - wal/ + - _WAL directories and files_ + - influxd.bolt + - influxd.sqlite +- /etc/influxdb2/ + - configs +{{% /filesystem-diagram %}} + +### Quay.io + +#### Quay default paths +| Path | Default | +|:---- |:------- | +| [Engine path](#engine-path) | `/root/.influxdbv2/engine/` | +| [Bolt path](#bolt-path) | `/root/.influxdbv2/influxd.bolt` | +| [SQLite path](#sqlite-path) | `/root/.influxdbv2/influxd.sqlite` | +| [Configs path](#configs-path) | `/root/.influxdbv2/configs` | + +#### Quay file system overview +{{% filesystem-diagram %}} +- /root/.influxdbv2/ + - engine/ + - data/ + - _TSM directories and files_ + - wal/ + - _WAL directories and files_ + - configs + - influxd.bolt + - influxd.sqlite +{{% /filesystem-diagram %}} +{{% /tab-content %}} + + + +{{% tab-content %}} +#### Kubernetes default paths +| Path | Default | +|:------------------------------|:------------------------------------| +| [Engine path](#engine-path) | `/var/lib/influxdb2/engine/` | +| [Bolt path](#bolt-path) | `/var/lib/influxdb2/influxd.bolt` | +| [SQLite path](#sqlite-path) | `/var/lib/influxdb2/influxd.sqlite` | +| [Configs path](#configs-path) | `/etc/influxdb2/configs` | + +#### Kubernetes file system overview +{{% filesystem-diagram %}} +- /var/lib/influxdb2/ + - engine/ + - data/ + - _TSM directories and files_ + + + - wal/ + - _WAL directories and files_ + - influxd.bolt + - influxd.sqlite +- /etc/influxdb2/ + - configs +{{% /filesystem-diagram %}} +{{% /tab-content %}} + +{{< /tabs-wrapper >}} + +--- + +#### TSM directories and files layout + +TSM directories and files are stored in the `data` directory inside the [engine path](#engine-path). +The diagram below is **relative to the [engine path](#file-system-layout)**. + +{{% filesystem-diagram %}} +- .../data/ + - 000xX00xxXx000x0/ _(bucket ID)_ + - _series/ _(series directory)_ + - 00/ _(internal shard index)_ + - 0000 _(internal shard index file)_ + - autogen + - 0123/ _(shard ID)_ + - index _(index directory)_ + - L0-00000001.tsl _(write-ahead log for the TSI index)_ + - L0-00000001.tsi _(series index)_ + - MANIFEST _(index manifest)_ +{{% /filesystem-diagram %}} + +#### WAL directories and files layout + +WAL directories and files are stored in the `data` directory inside the [engine path](#engine-path). +The diagram below is **relative to the [engine path](#file-system-layout)**. + +{{% filesystem-diagram %}} +- .../wal/ + - 000xX00xxXx000x0/ _(bucket ID)_ + - autogen/ + - 0123/ _(shard ID)_ + - _01234.wal _(WAL file)_ +{{% /filesystem-diagram %}} diff --git a/content/influxdb/v2.5/reference/internals/metrics.md b/content/influxdb/v2.5/reference/internals/metrics.md new file mode 100644 index 000000000..b86d45d00 --- /dev/null +++ b/content/influxdb/v2.5/reference/internals/metrics.md @@ -0,0 +1,1743 @@ +--- +title: InfluxDB OSS metrics +description: > + Get metrics about the workload performance of an InfluxDB OSS instance. +menu: + influxdb_2_5_ref: + parent: InfluxDB internals + name: Metrics +influxdb/v2.5/tags: [cpu, memory, metrics, performance, Prometheus, storage, usage] +--- +Get metrics about the workload performance of an InfluxDB OSS instance. + +InfluxDB OSS exposes a `/metrics` endpoint that returns +performance, resource, and usage metrics formatted in the [Prometheus plain-text exposition format](https://prometheus.io/docs/instrumenting/exposition_formats). + +[{{< api-endpoint method="GET" endpoint="http://localhost:8086/metrics" >}}](/influxdb/v2.1/api/#operation/GetMetrics) + +Metrics contain a name, an optional set of key-value pairs, and a value. + +The following descriptors precede each metric: + +- `HELP`: description of the metric +- `TYPE`: [Prometheus metric type](https://prometheus.io/docs/concepts/metric_types/) (`counter`, `gauge`, `histogram`, or `summary`) + +#### Example + +```sh +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.17"} 1 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 2.27988488e+08 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 9.68016566648e+11 +``` + +The InfluxDB `/metrics` endpoint returns metrics associated with the following categories: + +- [Boltdb](#boltdb-statistics) +- [Go runtime](#go-runtime-statistics) +- [HTTP API](#http-api-statistics) +- [InfluxDB objects and queries](#influxdb-object-and-query-statistics) + - [QC (query controller)](#qc-query-controller-statistics) +- [InfluxDB services](#influxdb-service-statistics) +- [InfluxDB storage](#influxdb-storage-statistics) +- [InfluxDB tasks](#influxdb-task-statistics) + +## Boltdb statistics + +### Reads total + +Total number of boltdb reads. + +#### Example + +```sh +# HELP boltdb_reads_total Total number of boltdb reads +# TYPE boltdb_reads_total counter +boltdb_reads_total 75129 +``` +### Writes total + +Total number of boltdb writes. + +#### Example + +```sh +# HELP boltdb_writes_total Total number of boltdb writes +# TYPE boltdb_writes_total counter +boltdb_writes_total 201591 +``` + +## Go runtime statistics + +For more detail about Go runtime statistics, see the following: +- [Go diagostics documentation](https://go.dev/doc/diagnostics) +- [Go mstats](https://github.com/golang/go/blob/master/src/runtime/mstats.go) + +### GC (garbage collection) duration seconds + +Summary of the pause duration of garbage collection cycles. + +#### Example + +```sh +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 5.1467e-05 +-- +``` + +### Goroutines + +Number of goroutines that currently exist. + +#### Example + +```sh +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 1566 +``` + +### Info + +Information about the Go environment. + +#### Example + +```sh +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.17"} 1 +``` + +### Memory allocated bytes + +Number of bytes allocated and still in use. + +#### Example + +```sh +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 2.27988488e+08 +``` + +### Memory allocated bytes total + +Total number of bytes allocated, even if freed. + +#### Example + +```sh +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 9.68016566648e+11 +``` + +### Memory bucket hash system bytes + +Number of bytes used by the profiling bucket hash table. + +#### Example + +```sh +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.0067613e+07 +``` + +### Memory frees total + +Total number of frees. + +#### Example + +```sh +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 1.3774541795e+10 +``` + +### Memory GC (garbage collection) CPU fraction + +Fraction of this program's available CPU time used by the GC since the program started. + +#### Example + +```sh +# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. +# TYPE go_memstats_gc_cpu_fraction gauge +go_memstats_gc_cpu_fraction 0.011634918451016558 +``` + +### Memory GC (garbage collection) system bytes + +Number of bytes used for garbage collection system metadata. + +#### Example + +```sh +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 4.63048016e+08 +``` + +### Memory heap allocated bytes + +Number of heap bytes allocated and still in use. + +#### Example + +```sh +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 2.27988488e+08 +``` + +### Memory heap idle bytes + +Number of heap bytes waiting to be used. + +#### Example + +```sh +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 1.0918273024e+10 +``` + +### Memory heap in use bytes + +Number of heap bytes that are in use. + +#### Example + +```sh +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 3.5975168e+08 +``` + +### Memory heap objects + +Number of allocated objects. +_Allocated_ heap objects include all reachable objects, as +well as unreachable objects that the garbage collector has not yet freed. + +#### Example + +```sh +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 2.404017e+06 +``` + +### Memory heap released bytes + +Number of heap bytes released to the OS. + +#### Example + +```sh +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 2.095038464e+09 +``` + +### Memory heap system bytes + +Number of heap bytes obtained from the system. + +#### Example + +```sh +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 1.1278024704e+10 +``` + +### Memory last GC (garbage collection) time seconds + +Number of seconds since 1970 of the last garbage collection. + +#### Example + +```sh +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.64217120199452e+09 +``` + +### Memory lookups total + +Total number of pointer lookups. + +#### Example + +```sh +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +``` + +### Memory allocations total + +Cumulative count of heap objects allocated. + +#### Example + +```sh +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 1.3776945812e+10 +``` + +### Memory mcache in use bytes + +Number of bytes in use by mcache structures. + +#### Example + +```sh +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 9600 +``` + +### Memory mcache system bytes + +Number of bytes used for mcache structures obtained from system. + +#### Example + +```sh +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 16384 +``` + +### Memory mspan in use bytes + +Number of bytes of allocated mspan structures. + +#### Example + +```sh +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 4.199e+06 +``` + +### Memory mspan system bytes + +Bytes of memory obtained from the OS for mspan structures. + +#### Example + +```sh +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 1.65609472e+08 +``` + +### Memory next GC (garbage collection) bytes + +Number of heap bytes when next garbage collection will take place. + +#### Example + +```sh +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 4.45628016e+08 +``` + +### Memory other system bytes + +Number of bytes used for other system allocations. + +#### Example + +```sh +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 8.1917722e+07 +``` + +### Memory stack in use bytes + +Number of bytes in use by the stack allocator. + +#### Example + +```sh +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 8.84736e+06 +``` + +### Memory stack system bytes + +Number of bytes obtained from system for stack allocator. + +#### Example + +```sh +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 8.84736e+06 +``` + +### Memory system bytes + +Number of bytes obtained from system. + +#### Example + +```sh +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 1.2007531271e+10 +``` + +### Threads + +Number of OS threads created. + +#### Example + +```sh +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 27 +``` + +## HTTP API statistics + +### API request duration seconds + +How long InfluxDB took to respond to the HTTP request. + +#### Example + +```sh +# HELP http_api_request_duration_seconds Time taken to respond to HTTP request +# TYPE http_api_request_duration_seconds histogram +http_api_request_duration_seconds_bucket{handler="platform",method="DELETE",path="/api/v2/authorizations/:id",response_code="204",status="2XX",user_agent="Chrome",le="0.005"} 0 +-- +``` + +### API requests total + +Number of HTTP requests received. + +#### Example + +```sh +# HELP http_api_requests_total Number of http requests received +# TYPE http_api_requests_total counter +http_api_requests_total{handler="platform",method="DELETE",path="/api/v2/authorizations/:id",response_code="204",status="2XX",user_agent="Chrome"} 1 +-- +``` + +### Query request bytes + +Count of bytes received. + +#### Example + +```sh +# HELP http_query_request_bytes Count of bytes received +# TYPE http_query_request_bytes counter +http_query_request_bytes{endpoint="/api/v2/query",org_id="48c88459ee424a04",status="200"} 727 +``` + +### Query request count + +Total number of query requests. + +#### Example + +```sh +# HELP http_query_request_count Total number of query requests +# TYPE http_query_request_count counter +http_query_request_count{endpoint="/api/v2/query",org_id="48c88459ee424a04",status="200"} 2 +``` + +### Query response bytes + +Count of bytes returned by the query endpoint. + +#### Example + +```sh +# HELP http_query_response_bytes Count of bytes returned +# TYPE http_query_response_bytes counter +http_query_response_bytes{endpoint="/api/v2/query",org_id="48c88459ee424a04",status="200"} 103 +``` + +## InfluxDB object and query statistics + +### Buckets total + +Total number of buckets on the server. + +#### Example + +```sh +# HELP influxdb_buckets_total Number of total buckets on the server +# TYPE influxdb_buckets_total counter +influxdb_buckets_total 9 +``` + +### Dashboards total + +Total number of dashboards on the server. + +#### Example + +```sh +# HELP influxdb_dashboards_total Number of total dashboards on the server +# TYPE influxdb_dashboards_total counter +influxdb_dashboards_total 2 +``` + +### Info + +Information about the InfluxDB environment. + +#### Example + +```sh +# HELP influxdb_info Information about the influxdb environment. +# TYPE influxdb_info gauge +influxdb_info{arch="amd64",build_date="2021-12-28T22:12:40Z",commit="657e1839de",cpus="8",os="darwin",version="v2.1.1"} 1 +``` + +### Organizations total + +Total number of organizations on the server. + +#### Example + +```sh +# HELP influxdb_organizations_total Number of total organizations on the server +# TYPE influxdb_organizations_total counter +influxdb_organizations_total 2 +``` + +### Scrapers total + +Total number of scrapers on the server. + +#### Example + +```sh +# HELP influxdb_scrapers_total Number of total scrapers on the server +# TYPE influxdb_scrapers_total counter +influxdb_scrapers_total 0 +``` + +### Telegrafs total + +Total number of Telegraf configurations on the server. + +#### Example + +```sh +# HELP influxdb_telegrafs_total Number of total telegraf configurations on the server +# TYPE influxdb_telegrafs_total counter +influxdb_telegrafs_total 0 +``` + +### Token services total + +Total number of API tokens on the server. + +#### Example + +```sh +# HELP influxdb_tokens_total Number of total tokens on the server +# TYPE influxdb_tokens_total counter +influxdb_tokens_total 23 +``` + +### Uptime seconds + +InfluxDB process uptime in seconds. + +#### Example + +```sh +# HELP influxdb_uptime_seconds influxdb process uptime in seconds +# TYPE influxdb_uptime_seconds gauge +influxdb_uptime_seconds{id="077238f9ca108000"} 343354.914499305 +``` + +### Users total + +Total number of users on the server. + +#### Example + +```sh +# HELP influxdb_users_total Number of total users on the server +# TYPE influxdb_users_total counter +influxdb_users_total 84 +``` + +## QC (query controller) statistics + +### All active + +Number of queries in all states. + +#### Example + +```sh +# HELP qc_all_active Number of queries in all states +# TYPE qc_all_active gauge +qc_all_active{org="48c88459ee424a04"} 0 +-- +``` + +### All duration seconds + +Total time spent in all query states. + +#### Example + +```sh +# HELP qc_all_duration_seconds Histogram of total times spent in all query states +# TYPE qc_all_duration_seconds histogram +qc_all_duration_seconds_bucket{org="48c88459ee424a04",le="0.001"} 0 +-- +``` + +### Compiling active + +Number of queries actively compiling. + +#### Example + +```sh +# HELP qc_compiling_active Number of queries actively compiling +# TYPE qc_compiling_active gauge +qc_compiling_active{compiler_type="ast",org="ed32b47572a0137b"} 0 +-- +``` + +### Compiling duration seconds + +Histogram of times spent compiling queries. + +#### Example + +```sh +# HELP qc_compiling_duration_seconds Histogram of times spent compiling queries +# TYPE qc_compiling_duration_seconds histogram +qc_compiling_duration_seconds_bucket{compiler_type="ast",org="ed32b47572a0137b",le="0.001"} 999 +-- +``` + +### Executing active + +Number of queries actively executing. + +#### Example + +```sh +# HELP qc_executing_active Number of queries actively executing +# TYPE qc_executing_active gauge +qc_executing_active{org="48c88459ee424a04"} 0 +-- +``` + +### Executing duration seconds + +Histogram of times spent executing queries. + +#### Example + +```sh +# HELP qc_executing_duration_seconds Histogram of times spent executing queries +# TYPE qc_executing_duration_seconds histogram +qc_executing_duration_seconds_bucket{org="48c88459ee424a04",le="0.001"} 0 +-- +``` + +### Memory unused bytes + +Free memory as seen by the internal memory manager. + +#### Example + +```sh +# HELP qc_memory_unused_bytes The free memory as seen by the internal memory manager +# TYPE qc_memory_unused_bytes gauge +qc_memory_unused_bytes{org="48c88459ee424a04"} 0 +-- +``` + +### Queueing active + +Number of queries actively queueing. + +#### Example + +```sh +# HELP qc_queueing_active Number of queries actively queueing +# TYPE qc_queueing_active gauge +qc_queueing_active{org="48c88459ee424a04"} 0 +-- +``` + +### Queueing duration seconds + +Histogram of times spent queueing queries. + +#### Example + +```sh +# HELP qc_queueing_duration_seconds Histogram of times spent queueing queries +# TYPE qc_queueing_duration_seconds histogram +qc_queueing_duration_seconds_bucket{org="48c88459ee424a04",le="0.001"} 2 +-- +``` + +### Requests total + +Count of the query requests. + +#### Example + +```sh +# HELP qc_requests_total Count of the query requests +# TYPE qc_requests_total counter +qc_requests_total{org="48c88459ee424a04",result="success"} 2 +-- +``` + +### Read request duration + +Histogram of times spent in read requests. + +#### Example + +```sh +# HELP query_influxdb_source_read_request_duration_seconds Histogram of times spent in read requests +# TYPE query_influxdb_source_read_request_duration_seconds histogram +query_influxdb_source_read_request_duration_seconds_bucket{op="readTagKeys",org="48c88459ee424a04",le="0.001"} 0 +-- +``` + +## InfluxDB service statistics + +### Bucket service new call total + +Number of calls to the bucket creation service. + +#### Example + +```sh +# HELP service_bucket_new_call_total Number of calls +# TYPE service_bucket_new_call_total counter +service_bucket_new_call_total{method="find_bucket"} 6177 +-- +``` + +### Bucket service new duration + +Duration of calls to the bucket creation service. + +#### Example + +```sh +# HELP service_bucket_new_duration Duration of calls +# TYPE service_bucket_new_duration histogram +service_bucket_new_duration_bucket{method="find_bucket",le="0.005"} 5876 +-- +``` + +### Bucket service new error total + +Number of errors encountered by the bucket creation service. + +#### Example + +```sh +# HELP service_bucket_new_error_total Number of errors encountered +# TYPE service_bucket_new_error_total counter +service_bucket_new_error_total{code="not found",method="find_bucket_by_id"} 76 +``` + +### Onboard service new call total + +Number of calls to the onboarding service. + +#### Example + +```sh +# HELP service_onboard_new_call_total Number of calls +# TYPE service_onboard_new_call_total counter +service_onboard_new_call_total{method="is_onboarding"} 11 +``` + +### Onboard service new duration + +Duration of calls to the onboarding service. + +#### Example + +```sh +# HELP service_onboard_new_duration Duration of calls +# TYPE service_onboard_new_duration histogram +service_onboard_new_duration_bucket{method="is_onboarding",le="0.005"} 11 +-- +``` + +### Organization service call total + +Number of calls to the organization service. + +#### Example + +```sh +# HELP service_org_call_total Number of calls +# TYPE service_org_call_total counter +service_org_call_total{method="find_labels_for_resource"} 10 +``` + +### Organization service duration + +Duration of calls to the organization service. + +#### Example + +```sh +# HELP service_org_duration Duration of calls +# TYPE service_org_duration histogram +service_org_duration_bucket{method="find_labels_for_resource",le="0.005"} 10 +-- +``` + +### Organization service new call total + +Number of calls to the organization creation service. + +#### Example + +```sh +# HELP service_org_new_call_total Number of calls +# TYPE service_org_new_call_total counter +service_org_new_call_total{method="find_org"} 1572 +-- +``` + +### Organization service new duration + +Duration of calls to the organization creation service. + +#### Example + +```sh +# HELP service_org_new_duration Duration of calls +# TYPE service_org_new_duration histogram +service_org_new_duration_bucket{method="find_org",le="0.005"} 1475 +-- +``` + +### Organization service new error total + +Number of errors encountered by the organization creation service. + +#### Example + +```sh +# HELP service_org_new_error_total Number of errors encountered +# TYPE service_org_new_error_total counter +service_org_new_error_total{code="not found",method="find_orgs"} 1 +``` + +### Password service new call total + +Number of calls to the password creation service. + +#### Example + +```sh +# HELP service_password_new_call_total Number of calls +# TYPE service_password_new_call_total counter +service_password_new_call_total{method="compare_password"} 4 +``` + +### Password service new duration + +Duration of calls to the password creation service. + +#### Example + +```sh +# HELP service_password_new_duration Duration of calls +# TYPE service_password_new_duration histogram +service_password_new_duration_bucket{method="compare_password",le="0.005"} 0 +-- +``` + +### Password service new error total + +Number of errors encountered by the password creation service. + +#### Example + +```sh +# HELP service_password_new_error_total Number of errors encountered +# TYPE service_password_new_error_total counter +service_password_new_error_total{code="forbidden",method="compare_password"} 1 +``` + +### Pkger service call total + +Number of calls to the `pkger` service. + +#### Example + +```sh +# HELP service_pkger_call_total Number of calls +# TYPE service_pkger_call_total counter +service_pkger_call_total{method="export"} 3 +``` + +### Pkger service duration + +Duration of calls to the `pkger` service. + +#### Example + +```sh +# HELP service_pkger_duration Duration of calls +# TYPE service_pkger_duration histogram +service_pkger_duration_bucket{method="export",le="0.005"} 0 +-- +``` + +### Pkger service template export + +Metrics for exported resources. + +#### Example + +```sh +# HELP service_pkger_template_export Metrics for resources being exported +# TYPE service_pkger_template_export counter +service_pkger_template_export{buckets="0",by_stack="false",checks="0",dashboards="0",endpoints="0",label_mappings="0",labels="0",method="export",num_org_ids="1",rules="0",tasks="0",telegraf_configs="0",variables="0"} 3 +``` + +### Session service call total + +Number of calls to the session service. + +#### Example + +```sh +# HELP service_session_call_total Number of calls +# TYPE service_session_call_total counter +service_session_call_total{method="create_session"} 3 +-- +``` + +### Session service duration + +Duration of calls to the session service. + +#### Example + +```sh +# HELP service_session_duration Duration of calls +# TYPE service_session_duration histogram +service_session_duration_bucket{method="create_session",le="0.005"} 3 +-- +``` + +### Session service error total + +Number of errors encountered by the session service. + +#### Example + +```sh +# HELP service_session_error_total Number of errors encountered +# TYPE service_session_error_total counter +service_session_error_total{code="not found",method="find_session"} 4 +``` + +### Token service call total + +Number of calls to the token service. + +#### Example + +```sh +# HELP service_token_call_total Number of calls +# TYPE service_token_call_total counter +service_token_call_total{method="delete_authorization"} 3 +-- +``` + +### Token service duration + +Duration of calls to the token service. + +#### Example + +```sh +# HELP service_token_duration Duration of calls +# TYPE service_token_duration histogram +service_token_duration_bucket{method="delete_authorization",le="0.005"} 1 +-- +``` + +### Token service error total + +Number of errors encountered by the token service. + +#### Example + +```sh +# HELP service_token_error_total Number of errors encountered +# TYPE service_token_error_total counter +service_token_error_total{code="not found",method="delete_authorization"} 1 +``` + +### URM new call total +Number of calls to the URM (unified resource management) creation service. + +#### Example + +```sh +# HELP service_urm_new_call_total Number of calls +# TYPE service_urm_new_call_total counter +service_urm_new_call_total{method="find_urms"} 6451 +``` + +### urm new duration +Duration of calls to the URM creation service. + +#### Example + +```sh +# HELP service_urm_new_duration Duration of calls +# TYPE service_urm_new_duration histogram +service_urm_new_duration_bucket{method="find_urms",le="0.005"} 6198 +-- +``` + +### User new call total +Number of calls to the user creation service. + +#### Example + +```sh +# HELP service_user_new_call_total Number of calls +# TYPE service_user_new_call_total counter +service_user_new_call_total{method="find_permission_for_user"} 4806 +-- +``` + +### User new duration +Duration of calls to the user creation service. + +#### Example + +```sh +# HELP service_user_new_duration Duration of calls +# TYPE service_user_new_duration histogram +service_user_new_duration_bucket{method="find_permission_for_user",le="0.005"} 4039 +-- +``` + +## InfluxDB storage statistics + +To learn how InfluxDB writes, stores, and caches data, see [InfluxDB storage engine](/influxdb/v2.5/reference/internals/storage-engine/). + +### Bucket measurement number + +Number of measurements in a bucket. + +#### Example + +```sh +# HELP storage_bucket_measurement_num Gauge of measurement cardinality per bucket +# TYPE storage_bucket_measurement_num gauge +storage_bucket_measurement_num{bucket="0c3dd7d2d97f4b23"} 4 +-- +``` + +### Bucket series number + +Number of series in a bucket. + +#### Example + +```sh +# HELP storage_bucket_series_num Gauge of series cardinality per bucket +# TYPE storage_bucket_series_num gauge +storage_bucket_series_num{bucket="0c3dd7d2d97f4b23"} 38 +-- +``` + +### Cache disk bytes + +Size (in bytes) of the most recent [snapshot](/influxdb/v2.5/reference/internals/storage-engine/#cache). + +#### Example + +```sh +# HELP storage_cache_disk_bytes Gauge of size of most recent snapshot +# TYPE storage_cache_disk_bytes gauge +storage_cache_disk_bytes{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +### Cache in use bytes + +Current memory consumption (in bytes) of the [cache](/influxdb/v2.5/reference/internals/storage-engine/#cache). + +#### Example + +```sh +# HELP storage_cache_inuse_bytes Gauge of current memory consumption of cache +# TYPE storage_cache_inuse_bytes gauge +storage_cache_inuse_bytes{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +### Cache latest snapshot + +[Unix time](/influxdb/v2.5/reference/glossary/#unix-timestamp) of the most recent [snapshot](/influxdb/v2.5/reference/internals/storage-engine/#cache). + +#### Example + +```sh +# HELP storage_cache_latest_snapshot Unix time of most recent snapshot +# TYPE storage_cache_latest_snapshot gauge +storage_cache_latest_snapshot{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 1.644269658196893e+09 +-- +``` + +### Cache writes with dropped points + +Cumulative number of [cached](/influxdb/v2.5/reference/internals/storage-engine/#cache) writes that had [rejected points](/influxdb/v2.5/reference/glossary/#rejected-point). Writes with rejected points also increment the [write errors counter (`storage_cache_writes_err`)](#cache-writes-failed). + +#### Example + +```sh +# HELP storage_cache_writes_dropped Counter of writes to cache with some dropped points +# TYPE storage_cache_writes_dropped counter +storage_cache_writes_dropped{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +### Cache writes failed + +Cumulative number of [cached](/influxdb/v2.5/reference/internals/storage-engine/#cache) writes that [failed](/influxdb/v2.5/write-data/troubleshoot/#troubleshoot-failures), inclusive of [cache writes with dropped points (`storage_cache_writes_dropped`)](#cache-writes-with-dropped-points). + +#### Example + +```sh +# HELP storage_cache_writes_err Counter of failed writes to cache +# TYPE storage_cache_writes_err counter +storage_cache_writes_err{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +### Cache writes total + +Cumulative number of writes to [cache](/influxdb/v2.5/reference/internals/storage-engine/#cache). + +#### Example + +```sh +# HELP storage_cache_writes_total Counter of all writes to cache +# TYPE storage_cache_writes_total counter +storage_cache_writes_total{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +### Compactions active + +Currently running [TSM](/influxdb/v2.5/reference/internals/storage-engine/#time-structured-merge-tree-tsm) compactions (by level). + +#### Example + +```sh +# HELP storage_compactions_active Gauge of compactions (by level) currently running +# TYPE storage_compactions_active gauge +storage_compactions_active{bucket="ec3f82d1de90eddf",engine="tsm1",id="565",level="1",path="/Users/me/.influxdbv2/engine/data/ec3f82d1de90eddf/autogen/565",walPath="/Users/me/.influxdbv2/engine/wal/ec3f82d1de90eddf/autogen/565"} 0 +-- +``` + +### Compactions since startup + +[TSM](/influxdb/v2.5/reference/internals/storage-engine/#time-structured-merge-tree-tsm) compactions (by level) since startup. + +#### Example + +```sh +# HELP storage_compactions_duration_seconds Histogram of compactions by level since startup +# TYPE storage_compactions_duration_seconds histogram +storage_compactions_duration_seconds_bucket{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="567",level="cache",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/567",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/567",le="60"} 1 +storage_compactions_duration_seconds_bucket{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="567",level="cache",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/567",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/567",le="600"} 1 +storage_compactions_duration_seconds_bucket{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="567",level="cache",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/567",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/567",le="6000"} 1 +storage_compactions_duration_seconds_bucket{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="567",level="cache",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/567",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/567",le="+Inf"} 1 +storage_compactions_duration_seconds_sum{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="567",level="cache",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/567",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/567"} 0.167250668 +storage_compactions_duration_seconds_count{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="567",level="cache",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/567",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/567"} 1 +-- +``` + +### Compactions failed + +Failed [TSM](/influxdb/v2.5/reference/internals/storage-engine/#time-structured-merge-tree-tsm) compactions (by level). + +#### Example + +```sh +# HELP storage_compactions_failed Counter of TSM compactions (by level) that have failed due to error +# TYPE storage_compactions_failed counter +storage_compactions_failed{bucket="ec3f82d1de90eddf",engine="tsm1",id="565",level="1",path="/Users/me/.influxdbv2/engine/data/ec3f82d1de90eddf/autogen/565",walPath="/Users/me/.influxdbv2/engine/wal/ec3f82d1de90eddf/autogen/565"} 0 +-- +``` + +### Compactions queued + +Queued [TSM](/influxdb/v2.5/reference/internals/storage-engine/#time-structured-merge-tree-tsm) compactions (by level). + +#### Example + +```sh +# HELP storage_compactions_queued Counter of TSM compactions (by level) that are currently queued +# TYPE storage_compactions_queued gauge +storage_compactions_queued{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="567",level="1",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/567",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/567"} 0 +-- +``` + +### Retention check duration + +Retention policy check duration (in seconds). + +#### Example + +```sh +# HELP storage_retention_check_duration Histogram of duration of retention check (in seconds) +# TYPE storage_retention_check_duration histogram +storage_retention_check_duration_bucket{le="0.005"} 1 +storage_retention_check_duration_bucket{le="0.01"} 1 +storage_retention_check_duration_bucket{le="0.025"} 1 +storage_retention_check_duration_bucket{le="0.05"} 1 +storage_retention_check_duration_bucket{le="0.1"} 1 +storage_retention_check_duration_bucket{le="0.25"} 1 +storage_retention_check_duration_bucket{le="0.5"} 1 +storage_retention_check_duration_bucket{le="1"} 1 +storage_retention_check_duration_bucket{le="2.5"} 1 +storage_retention_check_duration_bucket{le="5"} 1 +storage_retention_check_duration_bucket{le="10"} 1 +storage_retention_check_duration_bucket{le="+Inf"} 1 +storage_retention_check_duration_sum 0.000351857 +storage_retention_check_duration_count 1 +-- +``` + +### Shard disk size + +Disk size (in bytes) of the [shard](/influxdb/v2.5/reference/internals/shards/). + +#### Example + +```sh +# HELP storage_shard_disk_size Gauge of the disk size for the shard +# TYPE storage_shard_disk_size gauge +storage_shard_disk_size{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 4.188743e+06 +-- +``` + +### Shard fields created + +Number of [shard](/influxdb/v2.5/reference/internals/shards/) fields created. + +#### Example + +```sh +# HELP storage_shard_fields_created Counter of the number of fields created +# TYPE storage_shard_fields_created counter +storage_shard_fields_created{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +### Shard series + +Number of series in the [shard](/influxdb/v2.5/reference/internals/shards/) index. + +#### Example + +```sh +# HELP storage_shard_series Gauge of the number of series in the shard index +# TYPE storage_shard_series gauge +storage_shard_series{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 38 +-- +``` + +### Shard writes + +Number of [shard write](/influxdb/v2.5/reference/internals/shards/#shard-writes) requests. + +#### Example + +```sh +# HELP storage_shard_write_count Count of the number of write requests +# TYPE storage_shard_write_count counter +storage_shard_write_count{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +### Shard dropped points + +Number of [rejected points](/influxdb/v2.5/reference/glossary/#rejected-point) in [shard writes](/influxdb/v2.5/reference/internals/shards/#shard-writes). + +#### Example + +```sh +# HELP storage_shard_write_dropped_sum Counter of the number of points dropped +# TYPE storage_shard_write_dropped_sum counter +storage_shard_write_dropped_sum{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +### Shard writes with errors + +Number of [shard write](/influxdb/v2.5/reference/internals/shards/#shard-writes) requests with errors. + +#### Example + +```sh +# HELP storage_shard_write_err_count Count of the number of write requests with errors +# TYPE storage_shard_write_err_count counter +storage_shard_write_err_count{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +### Points in shard writes with errors + +Number of points in [shard write](/influxdb/v2.5/reference/internals/shards/#shard-writes) requests with errors. + +#### Example + +```sh +# HELP storage_shard_write_err_sum Counter of the number of points for write requests with errors +# TYPE storage_shard_write_err_sum counter +storage_shard_write_err_sum{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +### Points in shard writes + +Number of points in [shard write](/influxdb/v2.5/reference/internals/shards/#shard-writes) requests. + +#### Example + +```sh +# HELP storage_shard_write_sum Counter of the number of points for write requests +# TYPE storage_shard_write_sum counter +storage_shard_write_sum{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +### Shard data size + +Gauge of the data size (in bytes) for each [shard](/influxdb/v2.5/reference/internals/shards/). + +#### Example + +```sh +# HELP storage_tsm_files_disk_bytes Gauge of data size in bytes for each shard +# TYPE storage_tsm_files_disk_bytes gauge +storage_tsm_files_disk_bytes{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 4.188743e+06 +-- +``` + +### Shard files + +Number of files per [shard](/influxdb/v2.5/reference/internals/shards/). + +#### Example + +```sh +# HELP storage_tsm_files_total Gauge of number of files per shard +# TYPE storage_tsm_files_total gauge +storage_tsm_files_total{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 1 +-- +``` + +### WAL size + +[WAL](/influxdb/v2.5/reference/internals/storage-engine/#write-ahead-log-wal) size (in bytes). + +#### Example + +```sh +# HELP storage_wal_size Gauge of size of WAL in bytes +# TYPE storage_wal_size gauge +storage_wal_size{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +## WAL write attempts + +Cumulative number of write attempts to the [WAL](/influxdb/v2.5/reference/internals/storage-engine/#write-ahead-log-wal). + +#### Example + +```sh +# HELP storage_wal_writes Number of write attempts to the WAL +# TYPE storage_wal_writes counter +storage_wal_writes{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +## WAL failed write attempts + +Cumulative number of failed write attempts to the [WAL](/influxdb/v2.5/reference/internals/storage-engine/#write-ahead-log-wal). + +#### Example + +```sh +# HELP storage_wal_writes_err Number of failed write attempts to the WAL +# TYPE storage_wal_writes_err counter +storage_wal_writes_err{bucket="0c3dd7d2d97f4b23",engine="tsm1",id="561",path="/Users/me/.influxdbv2/engine/data/0c3dd7d2d97f4b23/autogen/561",walPath="/Users/me/.influxdbv2/engine/wal/0c3dd7d2d97f4b23/autogen/561"} 0 +-- +``` + +## Points dropped due to partial writes + +Number of points dropped due to partial writes. + +#### Example + +```sh +# HELP storage_writer_dropped_points Histogram of number of points dropped due to partial writes +# TYPE storage_writer_dropped_points histogram +storage_writer_dropped_points_bucket{path="/Users/me/.influxdbv2/engine",le="10"} 0 +storage_writer_dropped_points_bucket{path="/Users/me/.influxdbv2/engine",le="100"} 0 +storage_writer_dropped_points_bucket{path="/Users/me/.influxdbv2/engine",le="1000"} 0 +storage_writer_dropped_points_bucket{path="/Users/me/.influxdbv2/engine",le="10000"} 0 +storage_writer_dropped_points_bucket{path="/Users/me/.influxdbv2/engine",le="100000"} 0 +storage_writer_dropped_points_bucket{path="/Users/me/.influxdbv2/engine",le="+Inf"} 0 +storage_writer_dropped_points_sum{path="/Users/me/.influxdbv2/engine"} 0 +storage_writer_dropped_points_count{path="/Users/me/.influxdbv2/engine"} 0 +-- +``` + +## Points in shard write requests with errors + +Number of points in [shard write](/influxdb/v2.5/reference/internals/shards/#shard-writes) requests with errors. + +#### Example + +```sh +# HELP storage_writer_err_points Histogram of number of points in errored shard write requests +# TYPE storage_writer_err_points histogram +storage_writer_err_points_bucket{path="/Users/me/.influxdbv2/engine",le="10"} 0 +storage_writer_err_points_bucket{path="/Users/me/.influxdbv2/engine",le="100"} 0 +storage_writer_err_points_bucket{path="/Users/me/.influxdbv2/engine",le="1000"} 0 +storage_writer_err_points_bucket{path="/Users/me/.influxdbv2/engine",le="10000"} 0 +storage_writer_err_points_bucket{path="/Users/me/.influxdbv2/engine",le="100000"} 0 +storage_writer_err_points_bucket{path="/Users/me/.influxdbv2/engine",le="+Inf"} 0 +storage_writer_err_points_sum{path="/Users/me/.influxdbv2/engine"} 0 +storage_writer_err_points_count{path="/Users/me/.influxdbv2/engine"} 0 +-- +``` + +## Points in successful shard write requests + +Number of points in successful [shard write](/influxdb/v2.5/reference/internals/shards/#shard-writes) requests. + +#### Example + +```sh +# HELP storage_writer_ok_points Histogram of number of points in successful shard write requests +# TYPE storage_writer_ok_points histogram +storage_writer_ok_points_bucket{path="/Users/me/.influxdbv2/engine",le="10"} 6 +storage_writer_ok_points_bucket{path="/Users/me/.influxdbv2/engine",le="100"} 6 +storage_writer_ok_points_bucket{path="/Users/me/.influxdbv2/engine",le="1000"} 8 +storage_writer_ok_points_bucket{path="/Users/me/.influxdbv2/engine",le="10000"} 20 +storage_writer_ok_points_bucket{path="/Users/me/.influxdbv2/engine",le="100000"} 24 +storage_writer_ok_points_bucket{path="/Users/me/.influxdbv2/engine",le="+Inf"} 24 +storage_writer_ok_points_sum{path="/Users/me/.influxdbv2/engine"} 125787 +storage_writer_ok_points_count{path="/Users/me/.influxdbv2/engine"} 24 +-- +``` + +## Points in write requests + +Number of points in write requests. + +#### Example + +```sh +# HELP storage_writer_req_points Histogram of number of points requested to be written +# TYPE storage_writer_req_points histogram +storage_writer_req_points_bucket{path="/Users/me/.influxdbv2/engine",le="10"} 6 +storage_writer_req_points_bucket{path="/Users/me/.influxdbv2/engine",le="100"} 6 +storage_writer_req_points_bucket{path="/Users/me/.influxdbv2/engine",le="1000"} 6 +storage_writer_req_points_bucket{path="/Users/me/.influxdbv2/engine",le="10000"} 14 +storage_writer_req_points_bucket{path="/Users/me/.influxdbv2/engine",le="100000"} 18 +storage_writer_req_points_bucket{path="/Users/me/.influxdbv2/engine",le="+Inf"} 18 +storage_writer_req_points_sum{path="/Users/me/.influxdbv2/engine"} 125787 +storage_writer_req_points_count{path="/Users/me/.influxdbv2/engine"} 18 +-- +``` + +## Shard write request timeouts + +Cumulative number of [shard write](/influxdb/v2.5/reference/internals/shards/#shard-writes) request timeouts. + +#### Example + +```sh +# HELP storage_writer_timeouts Number of shard write request timeouts +# TYPE storage_writer_timeouts counter +storage_writer_timeouts{path="/Users/me/.influxdbv2/engine"} 0 +-- +``` + +## InfluxDB task statistics + +### Task executor errors + +Number of errors thrown by the executor with the type of error (ex. Invalid, Internal, etc.) + +#### Example + +```sh +# HELP task_executor_errors_counter The number of errors thrown by the executor with the type of error (ex. Invalid, Internal, etc.) +# TYPE task_executor_errors_counter counter +task_executor_errors_counter{errorType="internal error",task_type="system"} 1183 +-- +``` + +### Task executor promise queue usage + +Percent of the promise queue that is currently full. + +#### Example + +```sh +# HELP task_executor_promise_queue_usage Percent of the promise queue that is currently full +# TYPE task_executor_promise_queue_usage gauge +task_executor_promise_queue_usage 0 +``` + +### Task executor run duration + +Duration (in seconds) between a task run starting and finishing. + +#### Example + +```sh +# HELP task_executor_run_duration The duration in seconds between a run starting and finishing. +# TYPE task_executor_run_duration summary + +task_executor_run_duration{taskID="08017725990f6000",task_type="",quantile="0.5"} 0.865043855 +task_executor_run_duration{taskID="08017725990f6000",task_type="",quantile="0.9"} 0.865043855 +task_executor_run_duration{taskID="08017725990f6000",task_type="",quantile="0.99"} 0.865043855 +task_executor_run_duration_sum{taskID="08017725990f6000",task_type=""} 1.524920552 +task_executor_run_duration_count{taskID="08017725990f6000",task_type=""} 2 +-- +``` + +### Task executor run latency seconds + +Latency between the task run's scheduled start time and the execution time, by task type. + +#### Example + +```sh +# HELP task_executor_run_latency_seconds Records the latency between the time the run was due to run and the time the task started execution, by task type +# TYPE task_executor_run_latency_seconds histogram +task_executor_run_latency_seconds_bucket{task_type="system",le="0.005"} 0 +task_executor_run_latency_seconds_bucket{task_type="system",le="0.01"} 0 +task_executor_run_latency_seconds_bucket{task_type="system",le="0.025"} 0 +task_executor_run_latency_seconds_bucket{task_type="system",le="0.05"} 0 +task_executor_run_latency_seconds_bucket{task_type="system",le="0.1"} 0 +task_executor_run_latency_seconds_bucket{task_type="system",le="0.25"} 2 +task_executor_run_latency_seconds_bucket{task_type="system",le="0.5"} 6 +task_executor_run_latency_seconds_bucket{task_type="system",le="1"} 6 +task_executor_run_latency_seconds_bucket{task_type="system",le="2.5"} 6 +task_executor_run_latency_seconds_bucket{task_type="system",le="5"} 6 +task_executor_run_latency_seconds_bucket{task_type="system",le="10"} 6 +task_executor_run_latency_seconds_bucket{task_type="system",le="+Inf"} 6 +task_executor_run_latency_seconds_sum{task_type="system"} 2.237636 +task_executor_run_latency_seconds_count{task_type="system"} 6 +-- +``` + +### Task executor run queue delta + +Duration (in seconds) between the task run's scheduled start time and the execution time. + +#### Example + +```sh +# HELP task_executor_run_queue_delta The duration in seconds between a run being due to start and actually starting. +# TYPE task_executor_run_queue_delta summary +task_executor_run_queue_delta{taskID="08017725990f6000",task_type="",quantile="0.5"} 0.324742 +task_executor_run_queue_delta{taskID="08017725990f6000",task_type="",quantile="0.9"} 0.324742 +task_executor_run_queue_delta{taskID="08017725990f6000",task_type="",quantile="0.99"} 0.324742 +task_executor_run_queue_delta_sum{taskID="08017725990f6000",task_type=""} 0.674875 +task_executor_run_queue_delta_count{taskID="08017725990f6000",task_type=""} 2 +-- +``` + +### Task executor total runs active + +Number of workers currently running tasks. + +#### Example + +```sh +# HELP task_executor_total_runs_active Total number of workers currently running tasks +# TYPE task_executor_total_runs_active gauge +task_executor_total_runs_active 0 +``` + +### Task executor total runs complete + +Number of task runs completed across all tasks, split out by success or failure. + +#### Example + +```sh +# HELP task_executor_total_runs_complete Total number of runs completed across all tasks, split out by success or failure. +# TYPE task_executor_total_runs_complete counter +task_executor_total_runs_complete{status="failed",task_type="system"} 1384 +task_executor_total_runs_complete{status="success",task_type="system"} 6 +-- +``` + +### Task executor workers busy + +Percent of total available workers that are currently busy. + +#### Example + +```sh +# HELP task_executor_workers_busy Percent of total available workers that are currently busy +# TYPE task_executor_workers_busy gauge +task_executor_workers_busy 0 +``` + +### Task scheduler current execution + +Number of tasks currently being executed. + +#### Example + +```sh +# HELP task_scheduler_current_execution Number of tasks currently being executed +# TYPE task_scheduler_current_execution gauge +task_scheduler_current_execution 128 +``` + +### Task scheduler execute delta + +Duration (in seconds) between a task run starting and finishing. + +#### Example + +```sh +# HELP task_scheduler_execute_delta The duration in seconds between a run starting and finishing. +# TYPE task_scheduler_execute_delta summary +task_scheduler_execute_delta{quantile="0.5"} NaN +-- +``` + +### Task scheduler schedule delay + +Summary of the delay between when a task is scheduled to run and when it is told to execute. + +#### Example + +```sh +# HELP task_scheduler_schedule_delay The duration between when a Item should be scheduled and when it is told to execute. +# TYPE task_scheduler_schedule_delay summary +task_scheduler_schedule_delay{quantile="0.5"} 120.001036 +task_scheduler_schedule_delay{quantile="0.9"} 120.001074 +task_scheduler_schedule_delay{quantile="0.99"} 120.001074 +task_scheduler_schedule_delay_sum 720.0033010000001 +task_scheduler_schedule_delay_count 6 +-- +``` + +### Task scheduler total execute failure + +Number of times a scheduled task execution has failed. + +#### Example + +```sh +# HELP task_scheduler_total_execute_failure Total number of times an execution has failed. +# TYPE task_scheduler_total_execute_failure counter +task_scheduler_total_execute_failure 0 +``` + +### Task scheduler total execution calls + +Number of scheduled executions across all tasks. + +#### Example + +```sh +# HELP task_scheduler_total_execution_calls Total number of executions across all tasks. +# TYPE task_scheduler_total_execution_calls counter +task_scheduler_total_execution_calls 4806 +``` + +### Task scheduler total release calls + +Number of release requests. + +#### Example + +```sh +# HELP task_scheduler_total_release_calls Total number of release requests. +# TYPE task_scheduler_total_release_calls counter +task_scheduler_total_release_calls 0 +``` + +### Task scheduler total schedule calls + +Number of schedule requests. + +#### Example + +```sh +# HELP task_scheduler_total_schedule_calls Total number of schedule requests. +# TYPE task_scheduler_total_schedule_calls counter +task_scheduler_total_schedule_calls 6 +``` + +### Task scheduler total schedule fails + +Number of schedule requests that fail to schedule. + +#### Example + +```sh +# HELP task_scheduler_total_schedule_fails Total number of schedule requests that fail to schedule. +# TYPE task_scheduler_total_schedule_fails counter +task_scheduler_total_schedule_fails 0 +``` diff --git a/content/influxdb/v2.5/reference/internals/runtime.md b/content/influxdb/v2.5/reference/internals/runtime.md new file mode 100644 index 000000000..0ca5f4e6f --- /dev/null +++ b/content/influxdb/v2.5/reference/internals/runtime.md @@ -0,0 +1,353 @@ +--- +title: InfluxDB runtime +description: > + Learn how to collect Go runtime profiling and tracing information to help with InfluxDB performance analysis and debugging. +menu: + influxdb_2_5_ref: + name: Runtime + parent: InfluxDB internals +weight: 103 +influxdb/2.3/tags: [go, internals, performance] +--- + +InfluxDB provides Go runtime profiles, trace, and other information +useful for analyzing and debugging the server runtime execution. + +- [Overview of Go runtime profiles](#overview-of-go-runtime-profiles) +- [Analyze Go runtime profiles](#analyze-go-runtime-profiles) +- [Analyze the Go runtime trace](#analyze-the-go-runtime-trace) +- [View the command line that invoked InfluxDB](#view-the-command-line-that-invoked-influxdb) +- [View runtime configuration](#view-runtime-configuration) + +## Overview of Go runtime profiles + +A **Go runtime profile** is a collection of stack traces showing call sequences that led to instances of a particular event. InfluxDB provides profile data for the following events: + +- blocks +- CPU usage +- memory allocation +- mutual exclusion (mutex) +- OS thread creation + +When you send a profile request to InfluxDB, the [Golang runtime pprof package](https://pkg.go.dev/runtime/pprof) samples the events on the runtime to collect stack traces and statistics (e.g., number of bytes of memory for heap allocation events). For some profiles, you can set the number of seconds that InfluxDB will collect profile data. + +Once data collection is complete, InfluxDB returns the profile data. The default response format is a compressed protocol buffer in +[profile.proto](https://github.com/google/pprof/blob/master/proto/profile.proto) format. **profile.proto** files are compatible with the [pprof](https://github.com/google/pprof) and [`go tool pprof`](https://go.dev/blog/pprof) analysis tools. For some profiles, InfluxDB provides an alternative human-readable plain text format with comments that translate to function calls and line numbers, but the `pprof` tools and **profile.proto** format offer the following advantages: + +- Read profiles from disk or HTTP. +- Aggregate and compare multiple profiles of the same type. +- Analyze and filter profile data. +- Generate visualizations and reports. + +## Analyze Go runtime profiles + +Use the `/debug/pprof` InfluxDB endpoints to download all the profiles at once or request them individually. + +- [Get all runtime profiles](#get-all-runtime-profiles) +- [Profile all memory allocations](#profile-all-memory-allocations) +- [Profile blocking operations](#profile-blocking-operations) +- [Profile CPU](#profile-cpu) +- [Profile goroutines](#profile-goroutines) +- [Profile heap memory allocations](#profile-heap-memory-allocations) +- [Profile mutual exclusions](#profile-mutual-exclusions-mutexes) +- [Profile thread creation](#profile-thread-creation) + +### Get all runtime profiles + +To download all runtime profiles at once, use an HTTP client to send a `GET` request to the `/debug/pprof/all` endpoint. `go tool pprof` can't fetch profiles directly from `/debug/pprof/all`. + +{{% api-endpoint method="GET" endpoint="http://localhost:8086/debug/pprof/all" %}} + +InfluxDB returns a gzipped tar file that contains the following profiles in the **profile.proto** format: + +- `profiles/allocs.pb.gz`: [profile all memory allocations](#profile-all-memory-allocations) +- `profiles/block.pb.gz`: [profile blocking operations](#profile-blocking-operations) +- `profiles/cpu.pb.gz`: _(Optional)_ [profile CPU](#profile-cpu). +- `profiles/goroutine.pb.gz`: [profile goroutines](#profile-goroutines) +- `profiles/heap.pb.gz`: [profile heap memory allocations](#profile-heap-memory-allocations) +- `profiles/mutex.pb.gz`: [profile mutual exclusions](#profile-mutual-exclusions-mutexes) +- `profiles/threadcreate.pb.gz`: [profile thread creation](#profile-thread-creation) + +| Option | Include by | +|:--------|:-----------| +| Profile CPU | Pass a [duration of seconds](/influxdb/v2.5/reference/glossary/#duration) with the `cpu` query parameter in your request URL | + +Use an HTTP client like `curl` or `wget` to download profiles from `/debug/pprof/all`. + +#### Example + +```sh +# Use `curl` to download a `.tar.gz` of all profiles after 10 seconds of CPU sampling. +# Use `tar` to extract the profiles folder. + +curl "http://localhost:8086/debug/pprof/all?cpu=10s" | tar -xz + +# Analyze an extracted profile. + +go tool pprof profiles/heap.pb.gz +``` + +### Profile all memory allocations + +Profiles memory allocations and sets the default profile display to __alloc_space__, +the total number of bytes allocated since the program began (including garbage-collected bytes). + +{{% api-endpoint method="GET" endpoint="http://localhost:8086/debug/pprof/allocs" %}} + +| Option | Include by | +|:--------|:-----------| +| Seconds to sample | Pass an [unsigned integer](/influxdb/v2.5/reference/glossary/#unsigned-integer) with the `seconds` query parameter in your request URL | +| Output plain text (mutually exclusive with `seconds`) | Pass `1` with the `debug` query parameter in your request URL | + +```sh +# Analyze the profile in interactive mode. + +go tool pprof http://localhost:8086/debug/pprof/allocs + +# `pprof` returns the following prompt: +# Entering interactive mode (type "help" for commands, "o" for options) +# (pprof) + +# At the prompt, get the top N memory allocations. + +(pprof) top10 +``` + +### Profile blocking operations + +Profiles operations that led to blocking on synchronization primitives and caused Go to suspend goroutine's execution. + +{{% api-endpoint method="GET" endpoint="http://localhost:8086/debug/pprof/block" %}} + +| Option | Include by | +|:--------|:-----------| +| Output plain text | Pass `1` with the `debug` query parameter in your request URL | + +```sh +# Analyze the profile in interactive mode. + +go tool pprof http://localhost:8086/debug/pprof/block + +# `pprof` returns the following prompt: +# Entering interactive mode (type "help" for commands, "o" for options) +# (pprof) + +# At the prompt, get the top N entries. + +(pprof) top10 +``` + +### Profile CPU + +Profiles program counters sampled from the execution stack. To download the profile, use an HTTP client to send a `GET` request to the `/debug/pprof/profile` endpoint. `go tool pprof` can't fetch the CPU profile directly. + +{{% api-endpoint method="GET" endpoint="http://localhost:8086/debug/pprof/profile" %}} + +| Option | Include by | +|:--------|:-----------| +| Seconds to sample (default `30`) | Pass an [unsigned integer](/influxdb/v2.5/reference/glossary/#unsigned-integer) with the `seconds` query parameter in your request URL | + +Use an HTTP client like `curl` or `wget` to download the profile. + +#### Example + +```sh +# Get the profile. + +curl http://localhost:8086/debug/pprof/profile -o cpu + +# Analyze the profile in interactive mode. + +go tool pprof ./cpu + +# At the prompt, get the top N functions most often running +# or waiting during the sample period. + +(pprof) top10 +``` + +Use the `seconds` query parameter to control the sampling duration. + +{{% note %}} + +`/debug/pprof/profile?seconds=SECONDS` returns the same CPU profile as `/debug/pprof/all?cpu=DURATION`. + +{{% /note %}} + +#### Example + +```sh +# Get the CPU profile after 10 seconds of sampling. + +curl "http://localhost:8086/debug/pprof/profile?seconds=10" -o cpu + +# Get all profiles after 10 seconds of CPU sampling. + +curl "http://localhost:8086/debug/pprof/all?cpu=10s" -o all.tar.gz +``` + +### Profile goroutines + +Profiles all current goroutines. + +{{% api-endpoint method="GET" endpoint="http://localhost:8086/debug/pprof/goroutine" %}} + +| Option | Include by | +|:--------|:-----------| +| Seconds to sample | Pass an [unsigned integer](/influxdb/v2.5/reference/glossary/#unsigned-integer) with the `seconds` query parameter in your request URL | +| Output plain text (mutually exclusive with `seconds`) | Pass `1` with the `debug` query parameter in your request URL | + +#### Example + +```sh +# Analyze the profile in interactive mode. + +go tool pprof http://localhost:8086/debug/pprof/goroutine + +# `pprof` returns the following prompt: +# Entering interactive mode (type "help" for commands, "o" for options) +# (pprof) + +# At the prompt, get the top N entries. + +(pprof) top10 +``` + +### Profile heap memory allocations + +Profiles heap, or memory allocations for live objects. + +{{% api-endpoint method="GET" endpoint="http://localhost:8086/debug/pprof/heap" %}} + +| Option | Include by | +|:--------|:-----------| +| Run garbage control before sampling | Pass `1` with the `gc` query parameter in your request URL | +| Seconds to sample | Pass an [unsigned integer](/influxdb/v2.5/reference/glossary/#unsigned-integer) with the `seconds` query parameter in your request URL | +| Output plain text (mutually exclusive with `seconds`) | Pass `1` with the `debug` query parameter in your request URL | + +#### Example + +```sh +# Analyze the profile in interactive mode. + +go tool pprof http://localhost:8086/debug/pprof/heap + +# `pprof` returns the following prompt: +# Entering interactive mode (type "help" for commands, "o" for options) +# (pprof) + +# At the prompt, get the top N memory-intensive nodes. + +(pprof) top10 + +# pprof displays the list: +# Showing nodes accounting for 142.46MB, 85.43% of 166.75MB total +# Dropped 895 nodes (cum <= 0.83MB) +# Showing top 10 nodes out of 143 +``` + +### Profile mutual exclusions (mutexes) + +Profiles holders of contended mutual exclusions (mutexes). + +{{% api-endpoint method="GET" endpoint="http://localhost:8086/debug/pprof/mutex" %}} + +| Option | Include by | +|:--------|:-----------| +| Seconds to sample | Pass an [unsigned integer](/influxdb/v2.5/reference/glossary/#unsigned-integer) with the `seconds` query parameter in your request URL | +| Output plain text (mutually exclusive with `seconds`) | Pass `1` with the `debug` query parameter in your request URL | + +#### Example + +```sh +# Analyze the profile in interactive mode. + +go tool pprof http://localhost:8086/debug/pprof/mutex + +# `pprof` returns the following prompt: +# Entering interactive mode (type "help" for commands, "o" for options) +# (pprof) + +# At the prompt, get the top N entries. + +(pprof) top10 +``` + +### Profile thread creation + +Profiles operations that led to the creation of OS threads. + +{{% api-endpoint method="GET" endpoint="http://localhost:8086/debug/pprof/threadcreate" %}} + +| Option | Include by | +|:--------|:-----------| +| Seconds to sample | Pass an [unsigned integer](/influxdb/v2.5/reference/glossary/#unsigned-integer) with the `seconds` query parameter in your request URL | +| Output plain text (mutually exclusive with `seconds`) | Pass `1` with the `debug` query parameter in your request URL | + +#### Example + +```sh +# Analyze the profile in interactive mode. + +go tool pprof http://localhost:8086/debug/pprof/threadcreate + +# `pprof` returns the following prompt: +# Entering interactive mode (type "help" for commands, "o" for options) +# (pprof) + +# At the prompt, get the top N entries. + +(pprof) top10 +``` + +## Analyze the Go runtime trace + +To trace execution events for InfluxDB, use the `/debug/pprof/trace` +endpoint with `go tool trace`. + +{{% api-endpoint method="GET" endpoint="http://localhost:8086/debug/pprof/trace" %}} + +#### Example + +```sh +# Download the trace file. + +curl http://localhost:8086/debug/pprof/trace -o trace.out + +# Analyze the trace. + +go tool trace ./trace.out +``` + +#### Generate a pprof-like profile from trace + +You can use `go tool trace` to generate _pprof-like_ profiles from +a trace file and then analyze them with `go tool pprof`. + +#### Example + +```sh +# Generate a profile from the downloaded trace file. + +go tool trace -pprof=PROFILE_TYPE ./trace.out > PROFILE_TYPE.pprof +``` + +Replace *`PROFILE_TYPE`* with one of the following [Golang profile types](https://pkg.go.dev/cmd/trace): + +- `net`: network blocking profile +- `sync`: synchronization blocking profile +- `syscall`: syscall blocking profile +- `sched`: scheduler latency profile + +## View the command line that invoked InfluxDB + +To view the command, arguments, and command-line variables that invoked InfluxDB, use the `/debug/pprof/cmdline` endpoint. + +{{% api-endpoint method="GET" endpoint="http://localhost:8086/debug/pprof/cmdline" %}} + +`/debug/pprof/cmdline` returns the command line invocation in plain text. + +## View runtime configuration + +In InfluxDB v2.3+, you can view your active runtime configuration, including flags and environment variables. +See how to [view your runtime server configuration](/influxdb/v2.5/reference/config-options/#view-your-runtime-server-configuration). diff --git a/content/influxdb/v2.5/reference/internals/shards.md b/content/influxdb/v2.5/reference/internals/shards.md new file mode 100644 index 000000000..82cce5611 --- /dev/null +++ b/content/influxdb/v2.5/reference/internals/shards.md @@ -0,0 +1,160 @@ +--- +title: InfluxDB shards and shard groups +description: > + Learn the relationships between buckets, shards, and shard groups. + InfluxDB organizes time series data into **shards** when storing data to disk. + Shards are grouped into **shard groups**. +menu: + influxdb_2_5_ref: + name: Shards & shard groups + parent: InfluxDB internals +weight: 103 +influxdb/v2.5/tags: [storage, internals] +related: + - /influxdb/v2.5/reference/internals/storage-engine/ + - /influxdb/v2.5/organizations/buckets/ + - /influxdb/v2.5/reference/cli/influx/bucket/ + - /influxdb/v2.5/admin/internals/ +--- + +InfluxDB organizes time series data into **shards** when storing data to disk. +Shards are grouped into **shard groups**. +Learn the relationships between buckets, shards, and shard groups. + +- [Shards](#shards) +- [Shard groups](#shard-groups) + - [Shard group duration](#shard-group-duration) + - [Shard group diagram](#shard-group-diagram) +- [Shard life-cycle](#shard-life-cycle) + - [Shard precreation](#shard-precreation) + - [Shard writes](#shard-writes) + - [Shard compaction](#shard-compaction) +- [Shard deletion](#shard-deletion) + +## Shards +A shard contains encoded and compressed time series data for a given time range +defined by the [shard group duration](#shard-group-duration). +All points in a [series](#series) within the specified shard group duration are stored in the same shard. +A single shard contains multiple series, one or more [TSM files](#tsm-time-structured-merge-tree) on disk, +and belongs to a [shard group](#shard-groups). + +## Shard groups +A shard group belongs to an InfluxDB [bucket](/influxdb/v2.5/reference/glossary/#bucket) and contains time series data for a specific time range defined by +the [shard group duration](#shard-group-duration). + +{{% note %}} +In **InfluxDB OSS**, a shard group typically contains only a single shard. +In an **InfluxDB Enterprise 1.x cluster**, shard groups contain multiple shards +distributed across multiple data nodes. +{{% /note %}} + +### Shard group duration +The **shard group duration** specifies the time range for each shard group and determines how often to create a new shard group. +By default, InfluxDB sets the shard group duration according to +the [retention period](/influxdb/v2.5/reference/glossary/#retention-period) +of the bucket: + +| Bucket retention period | Default shard group duration | +|:----------------------- | ----------------------------:| +| less than 2 days | 1h | +| between 2 days and 6 months | 1d | +| greater than 6 months | 7d | + +##### Shard group duration configuration options +To configure a custom bucket shard group duration, use the `--shard-group-duration` +flag with the [`influx bucket create`](/influxdb/v2.5/reference/cli/influx/bucket/create/#create-a-custom-shard-group-duration) +and [`influx bucket update`](/influxdb/v2.5/reference/cli/influx/bucket/update//#update-the-shard-group-duration-of-a-bucket) commands. + +{{% note %}} +Shard group durations must be shorter than the bucket's retention period. +{{% /note %}} + +To view your bucket's shard group duration, use the +[`influx bucket list` command](/influxdb/v2.5/reference/cli/influx/bucket/list/). + +### Shard group diagram +The following diagram represents a **bucket** with a **4d retention period** +and a **1d shard group duration**: + +--- + +{{< html-diagram/shards >}} + +--- + +## Shard life-cycle + +### Shard precreation +The InfluxDB **shard precreation service** pre-creates shards with future start +and end times for each shard group based on the shard group duration. + +The precreator service does not pre-create shards for past time ranges. +When backfilling historical data, InfluxDB creates shards for past time ranges as needed, +resulting in temporarily lower write throughput. + +##### Shard precreation-related configuration settings +- [`storage-shard-precreator-advance-period`](/influxdb/v2.5/reference/config-options/#storage-shard-precreator-advance-period) +- [`storage-shard-precreator-check-interval`](/influxdb/v2.5/reference/config-options/#storage-shard-precreator-check-interval) + +### Shard writes +InfluxDB writes time series data to un-compacted or "hot" shards. +When a shard is no longer actively written to, InfluxDB [compacts](#shard-compaction) shard data, resulting in a "cold" shard. + +Typically, InfluxDB writes data to the most recent shard group, but when backfilling +historical data, InfluxDB writes to older shards that must first be un-compacted. +When the backfill is complete, InfluxDB re-compacts the older shards. + +### Shard compaction + +InfluxDB compacts shards at regular intervals to compress time series data and optimize disk usage. When compactions are enabled, InfluxDB checks to see whether shard compactions are needed every second. If there haven't been writes during the `compact-full-write-cold-duration` period (by default, `4h`), InfluxDB compacts all TSM files. Otherwise, InfluxDB groups TSM files into compaction levels (determined by the number of times the file have been compacted), and attempts to combine files and compress them more efficiently. + +InfluxDB uses the following four compaction levels: + +- **Level 0 (L0):** The log file (`LogFile`) is considered level 0 (L0). Once this file exceeds a `5MB` threshold, InfluxDB creates a new active log file, and the previous one begins compacting into an `IndexFile`. This first index file is at level 1 (L1). +- **Level 1 (L1):** InfluxDB flushes all newly written data held in an in-memory cache to disk into an `IndexFile`. +- **Level 2 (L2):** InfluxDB compacts up to eight L1-compacted files into one or more L2 files by + combining multiple blocks containing the same series into fewer blocks in one or more new files. +- **Level 3 (L3):** InfluxDB iterates over L2-compacted file blocks (over a certain size) + and combines multiple blocks containing the same series into one block in a new file. +- **Level 4 (L4):** **Full compaction** InfluxDB iterates over L3-compacted file blocks + and combines multiple blocks containing the same series into one block in a new file. + +InfluxDB schedules compactions preferentially, using the following guidelines: + +- The lower the level (fewer times the file has been compacted), the more weight is given to compacting the file. +- The more compactible files in a level, the higher the priority given to compacting that level. If the number of files in each level is equal, lower levels are compacted first. +- If a higher level has more candidates for compaction, it may be compacted before a lower level. InfluxDB multiplies the number of collection groups (collections of files to compact into a single next-generation file) by a specified weight (0.4, 0.3, 0.2, and 0.1) per level, to determine the compaction priority. + +##### Shard compaction-related configuration settings + +The following configuration settings are especially beneficial for systems with irregular loads, because they limit compactions during periods of high usage, and let compactions catch up during periods of lower load: + +- [`storage-compact-full-write-cold-duration`](/influxdb/v2.5/reference/config-options/#storage-compact-full-write-cold-duration) +- [`storage-compact-throughput-burst`](/influxdb/v2.5/reference/config-options/#storage-compact-throughput-burst) +- [`storage-max-concurrent-compactions`](/influxdb/v2.5/reference/config-options/#storage-max-concurrent-compactions) +- [`storage-max-index-log-file-size`](/influxdb/v2.5/reference/config-options/#storage-max-index-log-file-size) +- [`storage-series-file-max-concurrent-snapshot-compactions`](/influxdb/v2.5/reference/config-options/#storage-series-file-max-concurrent-snapshot-compactions) +- [`storage-series-file-max-concurrent-snapshot-compactions`](/influxdb/v2.5/reference/config-options/#storage-series-file-max-concurrent-snapshot-compactions) + +In systems with stable loads, if compactions interfere with other operations, typically, the system is undersized for its load, and configuration changes won't help much. + +## Shard deletion +The InfluxDB **retention enforcement service** routinely checks for shard groups +older than their bucket's retention period. +Once the start time of a shard group is beyond the bucket's retention period, +InfluxDB deletes the shard group and associated shards and TSM files. + +In buckets with an infinite retention period, shards remain on disk indefinitely. + +{{% note %}} +#### InfluxDB only deletes cold shards +InfluxDB only deletes **cold** shards. +If backfilling data beyond a bucket's retention period, the backfilled data will +remain on disk until the following occurs: + +1. The shard returns to a cold state. +2. The retention enforcement service deletes the shard group. +{{% /note %}} + +##### Retention enforcement-related configuration settings +- [`storage-retention-check-interval`](/influxdb/v2.5/reference/config-options/#storage-retention-check-interval) diff --git a/content/influxdb/v2.5/reference/internals/storage-engine.md b/content/influxdb/v2.5/reference/internals/storage-engine.md new file mode 100644 index 000000000..4c4385ec8 --- /dev/null +++ b/content/influxdb/v2.5/reference/internals/storage-engine.md @@ -0,0 +1,116 @@ +--- +title: InfluxDB storage engine +description: > + An overview of the InfluxDB storage engine architecture. +weight: 101 +menu: + influxdb_2_5_ref: + name: Storage engine + parent: InfluxDB internals +influxdb/v2.5/tags: [storage, internals] +products: [oss] +related: + - /resources/videos/tsm-engine/ + - /influxdb/v2.5/admin/internals/ +--- + +The InfluxDB storage engine ensures that: + +- Data is safely written to disk +- Queried data is returned complete and correct +- Data is accurate (first) and performant (second) + +This document outlines the internal workings of the storage engine. +This information is presented both as a reference and to aid those looking to maximize performance. + +The storage engine includes the following components: + +* [Write Ahead Log (WAL)](#write-ahead-log-wal) +* [Cache](#cache) +* [Time-Structed Merge Tree (TSM)](#time-structured-merge-tree-tsm) +* [Time Series Index (TSI)](#time-series-index-tsi) + +## Writing data from API to disk + +The storage engine handles data from the point an API write request is received through writing data to the physical disk. +Data is written to InfluxDB using [line protocol](/influxdb/v2.5/reference/syntax/line-protocol/) sent via HTTP POST request to the `/api/v2/write` endpoint or the [`/write` 1.x compatibility endpoint](/influxdb/v2.5/reference/api/influxdb-1x/). +Batches of [points](/influxdb/v2.5/reference/glossary/#point) are sent to InfluxDB, compressed, and written to a WAL for immediate durability. +Points are also written to an in-memory cache and become immediately queryable. +The in-memory cache is periodically written to disk in the form of [TSM](#time-structured-merge-tree-tsm) files. +As TSM files accumulate, the storage engine combines and compacts accumulated them into higher level TSM files. + +{{% note %}} +While points can be sent individually, for efficiency, most applications send points in batches. +Points in a POST body can be from an arbitrary number of series, measurements, and tag sets. +Points in a batch do not have to be from the same measurement or tagset. +{{% /note %}} + +## Write Ahead Log (WAL) + +The **Write Ahead Log** (WAL) retains InfluxDB data when the storage engine restarts. +The WAL ensures data is durable in case of an unexpected failure. + +When the storage engine receives a write request, the following steps occur: + +1. The write request is appended to the end of the WAL file. +2. Data is written to disk using `fsync()`. +3. The in-memory cache is updated. +4. When data is successfully written to disk, a response confirms the write request was successful. + +`fsync()` takes the file and pushes pending writes all the way to the disk. +As a system call, `fsync()` has a kernel context switch that's computationally expensive, but guarantees that data is safe on disk. + +When the storage engine restarts, the WAL file is read back into the in-memory database. +InfluxDB then answers requests to the `/read` endpoint. + +## Cache + +The **cache** is an in-memory copy of data points currently stored in the WAL. +The [WAL](#write-ahead-log-wal) and cache are separate entities and do not interact with each other. The storage engine coordinates writes to both. + +The cache: + +- Organizes points by key (measurement, tag set, and unique field). + Each field is stored in its own time-ordered range. +- Stores uncompressed data. +- Gets updates from the WAL each time the storage engine restarts. + The cache is queried at runtime and merged with the data stored in TSM files. +- Uses a maximum `maxSize` bytes of memory. + +Cache snapshots are cache objects currently being written to TSM files. +They're kept in memory while flushing so they can be queried along with the cache. +Queries to the storage engine merge data from the cache with data from the TSM files. +Queries execute on a copy of the data that is made from the cache at query processing time. +This way writes that come in while a query is running do not affect the result. +Deletes sent to the cache clear the specified key or time range for a specified key. + +## Time-Structured Merge Tree (TSM) + +To efficiently compact and store data, +the storage engine groups field values by series key, and then orders those field values by time. +(A [series key](/influxdb/v2.5/reference/glossary/#series-key) is defined by measurement, tag key and value, and field key.) + +The storage engine uses a **Time-Structured Merge Tree** (TSM) data format. +TSM files store compressed series data in a columnar format. +To improve efficiency, the storage engine only stores differences (or *deltas*) between values in a series. +Column-oriented storage lets the engine read by series key and omit extraneous data. + +After fields are stored safely in TSM files, the WAL is truncated and the cache is cleared. +The **compaction** process creates read-optimized TSM files. The TSM compaction code is quite complex. +However, the high-level goal is quite simple: +organize values for a series together into long runs to best optimize compression and scanning queries. + +For more information on the TSM engine, watch the video below: + +{{< youtube C5sv0CtuMCw >}} + +## Time Series Index (TSI) + +As data cardinality (the number of series) grows, queries read more series keys and become slower. +The **Time Series Index** ensures queries remain fast as data cardinality grows. +The TSI stores series keys grouped by measurement, tag, and field. +This allows the database to answer two questions well: + +- What measurements, tags, fields exist? + (This happens in meta queries.) +- Given a measurement, tags, and fields, what series keys exist? diff --git a/content/influxdb/v2.5/reference/internals/system-buckets.md b/content/influxdb/v2.5/reference/internals/system-buckets.md new file mode 100644 index 000000000..e7aab882b --- /dev/null +++ b/content/influxdb/v2.5/reference/internals/system-buckets.md @@ -0,0 +1,110 @@ +--- +title: InfluxDB system buckets +description: > + InfluxDB system buckets contain time series data used by and generated from the + InfluxDB monitoring and alerting system and the task engine. +menu: + influxdb_2_5_ref: + name: System buckets + parent: InfluxDB internals +weight: 103 +influxdb/v2.5/tags: [buckets] +related: + - /influxdb/v2.5/monitor-alert/ + - /influxdb/v2.5/process-data/ + - /{{< latest "flux" >}}/stdlib/influxdata/influxdb/monitor/check/ + - /{{< latest "flux" >}}/stdlib/influxdata/influxdb/monitor/deadman/ + - /{{< latest "flux" >}}/stdlib/influxdata/influxdb/monitor/from/ + - /{{< latest "flux" >}}/stdlib/influxdata/influxdb/monitor/logs/ + - /{{< latest "flux" >}}/stdlib/influxdata/influxdb/monitor/notify/ +--- + +InfluxDB system buckets contain time series data used by and generated from the +InfluxDB monitoring and alerting system and the task engine. + +Each InfluxDB organization includes the following system buckets: + +- [\_monitoring](#_monitoring-system-bucket) +- [\_tasks](#_tasks-system-bucket) + +{{% note %}} +The system bucket schemas below reference **InfluxDB data elements**. +For more information, see [InfluxDB key concepts](/influxdb/v2.5/reference/key-concepts/data-elements/). +{{% /note %}} + +## \_monitoring system bucket +The `_monitoring` system bucket stores InfluxDB data used to +[monitor data and send alerts](/influxdb/v2.5/monitor-alert/). + +**Data retention:** 7 days + +### \_monitoring bucket schema + +- **statuses** _(measurement)_ + - **tags:** + - **\_check\_id:** check ID + - **\_check\_name:** check name + - **\_level:** level evaluated by the check (ok, info, warn, or crit) + - **\_source\_measurement:** original measurement queried by the check + - **\_type:** check type (threshold or deadman) + - _other tags inherited from queried data or added in the check configuration_ + - **fields:** + - **\_message:** message generated by the check + - **\_source_timestamp:** original timestamp of the queried data + - _other fields inherited from queried data_ +- **notifications** _(measurement)_ + - **tags:** + - **\_check\_id:** check ID that triggered the notification + - **\_check\_name:** check name that triggered the notification + - **\_level:** check-evaluated level that triggered the notification (ok, info, warn, or crit) + - **\_notification_endpoint_id:** notification endpoint ID + - **\_notification_endpoint_name:** notification endpoint name + - **\_notification_rule_id:** notification rule ID + - **\_notification_rule_name:** notification rule name + - **\_sent:** sent status (true or false) + - **\_source\_measurement:** original measurement queried by the check + - **\_type:** check type (threshold or deadman) + - _other tags inherited from queried data or added in the check configuration_ + - **fields:** + - **\_message:** message generated by the check + - **\_source_timestamp:** original timestamp of the queried data + - **\_status_timestamp:** timestamp when the status (`_level`) was evaluated + - _other fields inherited from queried data_ +- {{% cloud-only %}} + + **rejected_points** _(measurement)_ + + - **tags:** + - **bucket:** ID of the bucket targeted in the write request + - **reason:** brief description of why InfluxDB rejected the point + - **field:** field name of the point (present if the point contained a field) + - **measurement:** measurement of the point (present if the point contained a measurement) + - **gotType:** InfluxDB field type in the point (present if type mismatch) + - **wantType:** InfluxDB field type in the bucket schema (present if type mismatch) + - **fields:** + - **_field:** `count` (for data type and schema conflicts) or `error` (for parsing errors) + - **_value:** `1` if `_field: "count"` or error details if `_field: "error"` + - **timestamp:** time the rejected point was logged + {{% /cloud-only %}} + + +## \_tasks system bucket +The `_tasks` system bucket stores data related to [InfluxDB task](/influxdb/v2.5/process-data/) executions. + +**Data retention:** 3 days + +### \_tasks bucket schema + +_A **task run** refers to a single task execution._ + +- **runs** _(measurement)_ + - **tags:** + - **status:** task run status (success or failed) + - **taskID:** task ID + - **fields:** + - **finishedAt:** timestamp when the task run finished + - **logs:** log output from the task run + - **requestedAt:** timestamp when the task run was requested + - **runID** task run ID + - **scheduledFor:** timestamp the task run was scheduled for + - **startedAt:** timestamp when the task run started diff --git a/content/influxdb/v2.5/reference/key-concepts/_index.md b/content/influxdb/v2.5/reference/key-concepts/_index.md new file mode 100644 index 000000000..30940ea9e --- /dev/null +++ b/content/influxdb/v2.5/reference/key-concepts/_index.md @@ -0,0 +1,22 @@ +--- +title: InfluxDB key concepts +description: > + Concepts related to InfluxDB. +weight: 2 +menu: + influxdb_2_5_ref: + name: Key concepts +influxdb/v2.5/tags: [key concepts] +--- + +Before working with InfluxDB {{< current-version >}}, it's helpful to learn a few key concepts. Browse the topics and videos below to learn more. + +{{< children >}} + +{{< youtube KZwr1xBDbBQ >}} + +{{< youtube rrdtyP4OyGM >}} + +{{< youtube B6qHmtRUEWY >}} + +{{< youtube kHIjROe0ZpA >}} diff --git a/content/influxdb/v2.5/reference/key-concepts/data-elements.md b/content/influxdb/v2.5/reference/key-concepts/data-elements.md new file mode 100644 index 000000000..f140001db --- /dev/null +++ b/content/influxdb/v2.5/reference/key-concepts/data-elements.md @@ -0,0 +1,207 @@ +--- +title: InfluxDB data elements +description: > + InfluxDB structures data using elements such as timestamps, field keys, field values, tags, etc. +weight: 102 +menu: + influxdb_2_5_ref: + parent: Key concepts + name: Data elements +influxdb/v2.5/tags: [key concepts, schema] +related: + - /resources/videos/data-model-building-blocks/ +--- + +InfluxDB {{< current-version >}} includes the following data elements: + +- [timestamp](#timestamp) +- [field key](#field-key) +- [field value](#field-value) +- [field set](#field-set) +- [tag key](#tag-key) +- [tag value](#tag-value) +- [tag set](#tag-set) +- [measurement](#measurement) +- [series](#series) +- [point](#point) +- [bucket](#bucket) +- [bucket schema](#bucket-schema) +- [organization](#organization) + +The sample data below is used to illustrate data elements concepts. +_Hover over highlighted terms to get acquainted with InfluxDB terminology and layout._ + + +**bucket:** `my_bucket` + +| _time | _measurement | {{< tooltip "Tag key" "location" >}} | {{< tooltip "Tag key" "scientist" >}} | _field | _value | +|:------------------- |:------------ |:------- |:------ |:-- |:------ | +| 2019-08-18T00:00:00Z | census | klamath | anderson | bees | 23 | +| 2019-08-18T00:00:00Z | census | portland | mullen | ants | 30 | +| 2019-08-18T00:06:00Z | census | klamath | anderson | bees | 28 | +| {{< tooltip "Timestamp" "2019-08-18T00:06:00Z" >}} | {{< tooltip "measurement" "census" >}} | {{< tooltip "Tag value" "portland" >}} | {{< tooltip "Tag value" "mullen">}} | {{< tooltip "Field key" "ants" >}} | {{< tooltip "Field value" "32" >}} | + +## Timestamp + +All data stored in InfluxDB has a `_time` column that stores timestamps. On disk, timestamps are stored in epoch nanosecond format. InfluxDB formats timestamps show the date and time in [RFC3339](/influxdb/v2.5/reference/glossary/#rfc3339-timestamp) UTC associated with data. Timestamp precision is important when you write data. + +## Measurement + +The `_measurement` column shows the name of the measurement `census`. Measurement names are strings. A measurement acts as a container for tags, fields, and timestamps. Use a measurement name that describes your data. The name `census` tells us that the field values record the number of `bees` and `ants`. + +## Fields + +A field includes a field key stored in the `_field` column and a field value stored in the `_value` column. + +### Field key + +A field key is a string that represents the name of the field. In the sample data above, `bees` and `ants` are field keys. + +### Field value + +A field value represents the value of an associated field. Field values can be strings, floats, integers, or booleans. The field values in the sample data show the number of `bees` at specified times: `23`, and `28` and the number of `ants` at a specified time: `30` and `32`. + +### Field set + +A field set is a collection of field key-value pairs associated with a timestamp. The sample data includes the following field sets: + +```bash + +census bees=23i,ants=30i 1566086400000000000 +census bees=28i,ants=32i 1566086760000000000 + ----------------- + Field set + +``` + +{{% note %}} +**Fields aren't indexed:** Fields are required in InfluxDB data and are not indexed. Queries that filter field values must scan all field values to match query conditions. As a result, queries on tags > are more performant than queries on fields. **Store commonly queried metadata in tags.** +{{% /note %}} + +## Tags + +The columns in the sample data, `location` and `scientist`, are tags. +Tags include tag keys and tag values that are stored as strings and metadata. + +### Tag key + +The tag keys in the sample data are `location` and `scientist`. +_For information about tag key requirements, see [Line protocol – Tag set](/influxdb/v2.5/reference/syntax/line-protocol/#tag-set)._ + +### Tag value + +The tag key `location` has two tag values: `klamath` and `portland`. +The tag key `scientist` also has two tag values: `anderson` and `mullen`. +_For information about tag value requirements, see [Line protocol – Tag set](/influxdb/v2.5/reference/syntax/line-protocol/#tag-set)._ + +### Tag set + +The collection of tag key-value pairs make up a tag set. The sample data includes the following four tag sets: + +```bash +location = klamath, scientist = anderson +location = portland, scientist = anderson +location = klamath, scientist = mullen +location = portland, scientist = mullen +``` + +{{% note %}} +**Tags are indexed:** Tags are optional. You don't need tags in your data structure, but it's typically a good idea to include tags. +Because tags are indexed, queries on tags are faster than queries on fields. This makes tags ideal for storing commonly-queried metadata. +{{% /note %}} + +{{% note %}} +Tags containing highly variable information like UUIDs, hashes, and random strings will lead to a large number of unique series in the database, known as **high series cardinality**. High series cardinality is a primary driver of high memory usage for many database workloads. See [series cardinality](/influxdb/v2.5/reference/glossary/#series-cardinality) for more information. +{{% /note %}} + + +#### Why your schema matters + +If most of your queries focus on values in the fields, for example, a query to find when 23 bees were counted: + +```js +from(bucket: "bucket-name") + |> range(start: 2019-08-17T00:00:00Z, stop: 2019-08-19T00:00:00Z) + |> filter(fn: (r) => r._field == "bees" and r._value == 23) +``` + +InfluxDB scans every field value in the dataset for `bees` before the query returns a response. If our sample `census` data grew to millions of rows, to optimize your query, you could rearrange your [schema](/influxdb/v2.5/reference/glossary/#schema) so the fields (`bees` and `ants`) becomes tags and the tags (`location` and `scientist`) become fields: + +| _time | _measurement | {{< tooltip "Tag key" "bees" >}} | _field | _value | +|:------------------- |:------------ |:------- |:-- |:------ | +| 2019-08-18T00:00:00Z | census | 23 | location | klamath | +| 2019-08-18T00:00:00Z | census | 23 | scientist | anderson | +| 2019-08-18T00:06:00Z | census | {{< tooltip "Tag value" "28" >}} | {{< tooltip "Field key" "location" >}} | {{< tooltip "Field value" "klamath" >}} | +| 2019-08-18T00:06:00Z | census | 28 | scientist | anderson | + +| _time | _measurement | {{< tooltip "Tag key" "ants" >}} | _field | _value | +|:------------------- |:------------ |:------- |:-- |:------ | +| 2019-08-18T00:00:00Z | census | 30 | location | portland | +| 2019-08-18T00:00:00Z | census | 30 | scientist | mullen | +| 2019-08-18T00:06:00Z | census | {{< tooltip "Tag value" "32" >}} | {{< tooltip "Field key" "location" >}} | {{< tooltip "Field value" "portland" >}} | +| 2019-08-18T00:06:00Z | census | 32 | scientist | mullen | + +Now that `bees` and `ants` are tags, InfluxDB doesn't have to scan all `_field` and `_value` columns. This makes your queries faster. + +## Bucket schema + +In InfluxDB Cloud, a bucket with the `explicit` schema-type requires an explicit +schema for each measurement. +Measurements contain tags, fields, and timestamps. +An explicit schema constrains the shape of data that can be written to that measurement. + +The following schema constrains `census` data: + +name | type | data_type +|:------- |:---------------|:-------------------- +time | timestamp | +location | tag | string +scientist | tag | string +ants | field | integer +bees | field | integer + +## Series + +Now that you're familiar with measurements, field sets, and tag sets, it's time to discuss series keys and series. A **series key** is a collection of points that share a measurement, tag set, and field key. For example, the [sample data](#sample-data) includes two unique series keys: + +| _measurement | tag set | _field | +|:------------- |:------------------------------- |:------ | +| census | {{< tooltip "Tag set" "location=klamath,scientist=anderson" >}} | {{< tooltip "Field key" "bees" >}} | +| census | location=portland,scientist=mullen | ants | + +A **series** includes timestamps and field values for a given series key. From the sample data, here's a **series key** and the corresponding **series**: + +```bash +# series key +census,location=klamath,scientist=anderson bees + +# series +2019-08-18T00:00:00Z 23 +2019-08-18T00:06:00Z 28 +``` + +Understanding the concept of a series is essential when designing your [schema](/influxdb/v2.5/reference/glossary/#schema) and working with your data in InfluxDB. + +## Point + +A **point** includes the series key, a field value, and a timestamp. For example, a single point from the [sample data](#sample-data) looks like this: + +`2019-08-18T00:00:00Z census ants 30 portland mullen` + +## Bucket + +All InfluxDB data is stored in a bucket. A **bucket** combines the concept of a database and a retention period (the duration of time that each data point persists). A bucket belongs to an organization. For more information about buckets, see [Manage buckets](/influxdb/v2.5/organizations/buckets/). + +## Organization + +An InfluxDB **organization** is a workspace for a group of [users](/influxdb/v2.5/users/). All [dashboards](/influxdb/v2.5/visualize-data/dashboards/), [tasks](/influxdb/v2.5/process-data/), buckets, and users belong to an organization. For more information about organizations, see [Manage organizations](/influxdb/v2.5/organizations/). + +If you're just starting out, we recommend taking a look at the following guides: + +- [Get started](/influxdb/v2.5/get-started) +- [Write data](/influxdb/v2.5/write-data) +- [Query data](/influxdb/v2.5/query-data) + +For an overview of how these elements interconnect within InfluxDB's data model, watch the following video: + +{{< youtube 3qTTqsL27lI >}} diff --git a/content/influxdb/v2.5/reference/key-concepts/data-schema.md b/content/influxdb/v2.5/reference/key-concepts/data-schema.md new file mode 100644 index 000000000..2b108c7c4 --- /dev/null +++ b/content/influxdb/v2.5/reference/key-concepts/data-schema.md @@ -0,0 +1,63 @@ +--- +title: InfluxDB data schema +description: > + InfluxDB uses a tabular data schema for displaying raw data in Data Explorer and for returning query results in annotated CSV syntax. +aliases: + - /influxdb/v2.5/reference/key-concepts/table-structure +weight: 103 +menu: + influxdb_2_5_ref: + parent: Key concepts + name: Data schema +influxdb/v2.5/tags: [key concepts] +--- + +InfluxDB [data elements](/influxdb/v2.5/reference/key-concepts/data-elements/) are stored in **time-structured merge tree (TSM)** and **time series index (TSI)** files to efficiently compact stored data. + +InfluxDB also provides a **tabular data schema** that includes the following: + +- [Annotation rows](#annotation-rows) +- [Header row](#header-row) +- [Data rows](#data-rows) +- [Other columns](#other-columns) +- [Group keys](#group-keys) + +The **tabular data schema is used for the following**: + +- To [view raw data](/influxdb/v2.5/query-data/execute-queries/data-explorer/#view-raw-data) when [exploring metrics with InfluxDB](/influxdb/v2.5/visualize-data/explore-metrics) +- To return query results in [annotated CSV syntax](/influxdb/v2.5/reference/syntax/annotated-csv/) + +## Annotation rows + +Annotation rows describe column properties, for example: + +- `#group` +- `#datatype` +- `#default` + +## Header row + +The header row defines column labels that describe data in each column, for example: + +- `table` +- `_time` +- `_value` +- `_field` +- `_measurement` +- tag key names (without underscore prefix): `tag-1`, `tag-2` + +## Data rows + +Each data row contains the data specified in the header row for one [point](/influxdb/v2.5/reference/glossary/#point). + +## Other columns + +In addition to the columns in each data row (specified in the header row), the following columns are optional: + +- `annotation` +- `result` +- `table` + +## Group keys + +Determine the contents of output tables in Flux by grouping records that share common values in specified columns. Learn more about [grouping your data with Flux](/influxdb/v2.5/query-data/flux/group-data/). diff --git a/content/influxdb/v2.5/reference/key-concepts/design-principles.md b/content/influxdb/v2.5/reference/key-concepts/design-principles.md new file mode 100644 index 000000000..c75c078a5 --- /dev/null +++ b/content/influxdb/v2.5/reference/key-concepts/design-principles.md @@ -0,0 +1,44 @@ +--- +title: InfluxDB design principles +description: > + Principles and tradeoffs related to InfluxDB design. +weight: 104 +menu: + influxdb_2_5_ref: + parent: Key concepts + name: Design principles +influxdb/v2.5/tags: [key concepts, design principles] +--- + +InfluxDB implements optimal design principles for time series data. Some of these design principles may have associated tradeoffs in performance. + +- [Time-ordered data](#time-ordered-data) +- [Strict update and delete permissions](#strict-update-and-delete-permissions) +- [Handle read and write queries first](#handle-read-and-write-queries-first) +- [Schemaless design](#schemaless-design) +- [Datasets over individual points](#datasets-over-individual-points) +- [Duplicate data](#duplicate-data) + +## Time-ordered data + +To improve performance, data is written in time-ascending order. + +## Strict update and delete permissions + +To increase query and write performance, InfluxDB tightly restricts **update** and **delete** permissions. Time series data is predominantly new data that is never updated. Deletes generally only affect data that isn't being written to, and contentious updates never occur. + +## Handle read and write queries first + +InfluxDB prioritizes read and write requests over strong consistency. InfluxDB returns results when a query is executed. Any transactions that affect the queried data are processed subsequently to ensure that data is eventually consistent. Therefore, if the ingest rate is high (multiple writes per ms), query results may not include the most recent data. + +## Schemaless design + +InfluxDB uses a schemaless design to better manage discontinuous data. Time series data are often ephemeral, meaning the data appears for a few hours and then goes away. For example, a new host that gets started and reports for a while and then gets shut down. + +## Datasets over individual points + +Because the data set is more important than an individual point, InfluxDB implements powerful tools to aggregate data and handle large data sets. Points are differentiated by timestamp and series, so don’t have IDs in the traditional sense. + +## Duplicate data + +To simplify conflict resolution and increase write performance, InfluxDB assumes data sent multiple times is duplicate data. Identical points aren't stored twice. If a new field value is submitted for a point, InfluxDB updates the point with the most recent field value. In rare circumstances, data may be overwritten. Learn more about [duplicate points](/influxdb/v2.5/write-data/best-practices/duplicate-points/). diff --git a/content/influxdb/v2.5/reference/prometheus-metrics.md b/content/influxdb/v2.5/reference/prometheus-metrics.md new file mode 100644 index 000000000..666decac9 --- /dev/null +++ b/content/influxdb/v2.5/reference/prometheus-metrics.md @@ -0,0 +1,286 @@ +--- +title: Prometheus metric parsing formats +description: > + When scraping [Prometheus-formatted metrics](https://prometheus.io/docs/concepts/data_model/) + and writing them to InfluxDB, metrics are parsed and stored in InfluxDB in different formats. +menu: + influxdb_2_5_ref: + name: Prometheus metrics +weight: 8 +influxdb/v2.5/tags: [prometheus] +related: + - https://prometheus.io/docs/concepts/data_model/, Prometheus data model + - /influxdb/v2.5/write-data/developer-tools/scrape-prometheus-metrics/ + - /{{< latest "flux" >}}/prometheus/, Work with Prometheus in Flux + - /{{< latest "telegraf" >}}/plugins/#input-prometheus, Telegraf Prometheus input plugin + - /influxdb/v2.5/write-data/no-code/scrape-data/ + - /{{< latest "flux" >}}/stdlib/experimental/prometheus/scrape/ +--- + +[Prometheus-formatted metrics](https://prometheus.io/docs/concepts/data_model/) +are parsed and written to InfluxDB in one of two formats, depending on the scraping tool used: + +- [Metric version 1](#metric-version-1) +- [Metric version 2](#metric-version-2) + +#### Scraping tools and parsing format +{{% oss-only %}} + +| Scraping tool | InfluxDB Metric version | +| :----------------------------------------------------------------------------------------- | ----------------------------------------------------: | +| [Telegraf Prometheus plugin](/{{< latest "telegraf" >}}/plugins/#input-prometheus) | _Determined by `metric_version` configuration option_ | +| [InfluxDB scraper](/influxdb/v2.5/write-data/no-code/scrape-data/) | 1 | +| Flux [`prometheus.scrape()`]({{< latest "flux" >}}/stdlib/experimental/prometheus/scrape/) | 2 | + +{{% /oss-only %}} +{{% cloud-only %}} + +| Scraping tool | InfluxDB Metric version | +| :----------------------------------------------------------------------------------------- | ----------------------------------------------------: | +| [Telegraf Prometheus plugin](/{{< latest "telegraf" >}}/plugins/#input-prometheus) | _Determined by `metric_version` configuration option_ | +| Flux [`prometheus.scrape()`]({{< latest "flux" >}}/stdlib/experimental/prometheus/scrape/) | 2 | + +{{% /cloud-only %}} + +## Metric version 1 + +- **_time**: timestamp +- **_measurement**: [Prometheus metric name](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels) + _(`_bucket`, `_sum`, and `_count` are trimmed from histogram and summary metric names)_ +- **\_field**: _depends on the [Prometheus metric type](https://prometheus.io/docs/concepts/metric_types/)_ + - Counter: `counter` + - Gauge: `gauge` + - Histogram: _histogram bucket upper limits_, `count`, `sum` + - Summary: _summary quantiles_, `count`, `sum` +- **_value**: [Prometheus metric value](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels) +- **tags**: A tag for each [Prometheus label](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels) + _(except for histogram bucket upper limits (`le`) or summary quantiles (`quantile`))_. + The label name is the tag key and the label value is the tag value. + +### Example Prometheus query results +The following are example Prometheus metrics scraped from the **InfluxDB OSS 2.x `/metrics`** endpoint: + +```sh +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 1.42276424e+09 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 5.259247e+06 +# HELP task_executor_run_latency_seconds Records the latency between the time the run was due to run and the time the task started execution, by task type +# TYPE task_executor_run_latency_seconds histogram +task_executor_run_latency_seconds_bucket{task_type="system",le="0.25"} 4413 +task_executor_run_latency_seconds_bucket{task_type="system",le="0.5"} 11901 +task_executor_run_latency_seconds_bucket{task_type="system",le="1"} 12565 +task_executor_run_latency_seconds_bucket{task_type="system",le="2.5"} 12823 +task_executor_run_latency_seconds_bucket{task_type="system",le="5"} 12844 +task_executor_run_latency_seconds_bucket{task_type="system",le="10"} 12864 +task_executor_run_latency_seconds_bucket{task_type="system",le="+Inf"} 74429 +task_executor_run_latency_seconds_sum{task_type="system"} 4.256783538679698e+11 +task_executor_run_latency_seconds_count{task_type="system"} 74429 +# HELP task_executor_run_duration The duration in seconds between a run starting and finishing. +# TYPE task_executor_run_duration summary +task_executor_run_duration{taskID="00xx0Xx0xx00XX0x0",task_type="threshold",quantile="0.5"} 5.178160855 +task_executor_run_duration{taskID="00xx0Xx0xx00XX0x0",task_type="threshold",quantile="0.9"} 5.178160855 +task_executor_run_duration{taskID="00xx0Xx0xx00XX0x0",task_type="threshold",quantile="0.99"} 5.178160855 +task_executor_run_duration_sum{taskID="00xx0Xx0xx00XX0x0",task_type="threshold"} 2121.9758301650004 +task_executor_run_duration_count{taskID="00xx0Xx0xx00XX0x0",task_type="threshold"} 570 +``` + +#### Resulting line protocol +``` +go_memstats_alloc_bytes_total counter=1.42276424e+09 +go_memstats_buck_hash_sys_bytes gauge=5.259247e+06 +task_executor_run_latency_seconds,task_type=system 0.25=4413,0.5=11901,1=12565,2.5=12823,5=12844,10=12864,+Inf=74429,sum=4.256783538679698e+11,count=74429 +task_executor_run_duration,taskID=00xx0Xx0xx00XX0x0,task_type=threshold 0.5=5.178160855,0.9=5.178160855,0.99=5.178160855,sum=2121.9758301650004,count=570 +``` + +{{< expand-wrapper >}} +{{% expand "View version 1 tables when queried from InfluxDB" %}} +| _time | _measurement | _field | _value | +| :------------------------ | :---------------------------- | :------ | -----------: | +| {{< flux/current-time >}} | go_memstats_alloc_bytes_total | counter | 1422764240.0 | + +| _time | _measurement | _field | _value | +| :------------------------ | :------------------------------ | :----- | --------: | +| {{< flux/current-time >}} | go_memstats_buck_hash_sys_bytes | gauge | 5259247.0 | + +| _time | _measurement | task_type | _field | _value | +| :------------------------ | :-------------------------------- | :-------- | :----- | -----: | +| {{< flux/current-time >}} | task_executor_run_latency_seconds | system | 0.25 | 4413.0 | + +| _time | _measurement | task_type | _field | _value | +| :------------------------ | :-------------------------------- | :-------- | :----- | ------: | +| {{< flux/current-time >}} | task_executor_run_latency_seconds | system | 0.5 | 11901.0 | + +| _time | _measurement | task_type | _field | _value | +| :------------------------ | :-------------------------------- | :-------- | :----- | ------: | +| {{< flux/current-time >}} | task_executor_run_latency_seconds | system | 1 | 12565.0 | + +| _time | _measurement | task_type | _field | _value | +| :------------------------ | :-------------------------------- | :-------- | :----- | ------: | +| {{< flux/current-time >}} | task_executor_run_latency_seconds | system | 2.5 | 12823.0 | + +| _time | _measurement | task_type | _field | _value | +| :------------------------ | :-------------------------------- | :-------- | :----- | ------: | +| {{< flux/current-time >}} | task_executor_run_latency_seconds | system | 5 | 12844.0 | + +| _time | _measurement | task_type | _field | _value | +| :------------------------ | :-------------------------------- | :-------- | :----- | ------: | +| {{< flux/current-time >}} | task_executor_run_latency_seconds | system | +Inf | 74429.0 | + +| _time | _measurement | task_type | _field | _value | +| :------------------------ | :-------------------------------- | :-------- | :----- | ----------------: | +| {{< flux/current-time >}} | task_executor_run_latency_seconds | system | sum | 425678353867.9698 | + +| _time | _measurement | task_type | _field | _value | +| :------------------------ | :-------------------------------- | :-------- | :----- | -----: | +| {{< flux/current-time >}} | task_executor_run_latency_seconds | system | count | 74429.0 | + +| _time | _measurement | task_type | _field | _value | +| :------------------------ | :------------------------- | :-------- | :----- | ----------: | +| {{< flux/current-time >}} | task_executor_run_duration | threshold | 0.5 | 5.178160855 | + +| _time | _measurement | task_type | _field | _value | +| :------------------------ | :------------------------- | :-------- | :----- | ----------: | +| {{< flux/current-time >}} | task_executor_run_duration | threshold | 0.9 | 5.178160855 | + +| _time | _measurement | task_type | _field | _value | +| :------------------------ | :------------------------- | :-------- | :----- | ----------: | +| {{< flux/current-time >}} | task_executor_run_duration | threshold | 0.99 | 5.178160855 | + +| _time | _measurement | task_type | _field | _value | +| :------------------------ | :------------------------- | :-------- | :----- | -----------------: | +| {{< flux/current-time >}} | task_executor_run_duration | threshold | sum | 2121.9758301650004 | + +| _time | _measurement | task_type | _field | _value | +| :------------------------ | :------------------------- | :-------- | :----- | -----: | +| {{< flux/current-time >}} | task_executor_run_duration | threshold | count | 570.0 | +{{% /expand %}} +{{< /expand-wrapper >}} + +## Metrics version 2 + +- **_time**: timestamp +- **_measurement**: `prometheus` +- **_field**: [Prometheus metric name](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels) + _(`_bucket` is trimmed from histogram metric names)_ +- **_value**: [Prometheus metric value](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels) +- **url**: URL metrics were scraped from +- **tags**: A tag for each [Prometheus label](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). + The label name is the tag key and the label value is the tag value. + +### Example Prometheus query results +The following are example Prometheus metrics scraped from the **InfluxDB OSS 2.x `/metrics`** endpoint: + +```sh +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 1.42276424e+09 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 5.259247e+06 +# HELP task_executor_run_latency_seconds Records the latency between the time the run was due to run and the time the task started execution, by task type +# TYPE task_executor_run_latency_seconds histogram +task_executor_run_latency_seconds_bucket{task_type="system",le="0.25"} 4413 +task_executor_run_latency_seconds_bucket{task_type="system",le="0.5"} 11901 +task_executor_run_latency_seconds_bucket{task_type="system",le="1"} 12565 +task_executor_run_latency_seconds_bucket{task_type="system",le="2.5"} 12823 +task_executor_run_latency_seconds_bucket{task_type="system",le="5"} 12844 +task_executor_run_latency_seconds_bucket{task_type="system",le="10"} 12864 +task_executor_run_latency_seconds_bucket{task_type="system",le="+Inf"} 74429 +task_executor_run_latency_seconds_sum{task_type="system"} 4.256783538679698e+11 +task_executor_run_latency_seconds_count{task_type="system"} 74429 +# HELP task_executor_run_duration The duration in seconds between a run starting and finishing. +# TYPE task_executor_run_duration summary +task_executor_run_duration{taskID="00xx0Xx0xx00XX0x0",task_type="threshold",quantile="0.5"} 5.178160855 +task_executor_run_duration{taskID="00xx0Xx0xx00XX0x0",task_type="threshold",quantile="0.9"} 5.178160855 +task_executor_run_duration{taskID="00xx0Xx0xx00XX0x0",task_type="threshold",quantile="0.99"} 5.178160855 +task_executor_run_duration_sum{taskID="00xx0Xx0xx00XX0x0",task_type="threshold"} 2121.9758301650004 +task_executor_run_duration_count{taskID="00xx0Xx0xx00XX0x0",task_type="threshold"} 570 +``` + +#### Resulting line protocol +{{< keep-url >}} +``` +prometheus,url=http://localhost:8086/metrics go_memstats_alloc_bytes_total=1.42276424e+09 +prometheus,url=http://localhost:8086/metrics go_memstats_buck_hash_sys_bytes=5.259247e+06 +prometheus,url=http://localhost:8086/metrics,task_type=system,le=0.25 task_executor_run_latency_seconds=4413 +prometheus,url=http://localhost:8086/metrics,task_type=system,le=0.5 task_executor_run_latency_seconds=11901 +prometheus,url=http://localhost:8086/metrics,task_type=system,le=1 task_executor_run_latency_seconds=12565 +prometheus,url=http://localhost:8086/metrics,task_type=system,le=2.5 task_executor_run_latency_seconds=12823 +prometheus,url=http://localhost:8086/metrics,task_type=system,le=5 task_executor_run_latency_seconds=12844 +prometheus,url=http://localhost:8086/metrics,task_type=system,le=10 task_executor_run_latency_seconds=12864 +prometheus,url=http://localhost:8086/metrics,task_type=system,le=+Inf task_executor_run_latency_seconds=74429 +prometheus,url=http://localhost:8086/metrics,task_type=system task_executor_run_latency_seconds_sum=4.256783538679698e+11 +prometheus,url=http://localhost:8086/metrics,task_type=system task_executor_run_latency_seconds_count=74429 +prometheus,url=http://localhost:8086/metrics,taskID=00xx0Xx0xx00XX0x0,task_type=threshold quantile=0.5 task_executor_run_duration=5.178160855 +prometheus,url=http://localhost:8086/metrics,taskID=00xx0Xx0xx00XX0x0,task_type=threshold quantile=0.9 task_executor_run_duration=5.178160855 +prometheus,url=http://localhost:8086/metrics,taskID=00xx0Xx0xx00XX0x0,task_type=threshold quantile=0.99 task_executor_run_duration=5.178160855 +prometheus,url=http://localhost:8086/metrics,taskID=00xx0Xx0xx00XX0x0,task_type=threshold task_executor_run_duration_sum=2121.9758301650004 +prometheus,url=http://localhost:8086/metrics,taskID=00xx0Xx0xx00XX0x0,task_type=threshold task_executor_run_duration_count=570 +``` + +{{< expand-wrapper >}} +{{% expand "View version 2 tables when queried from InfluxDB" %}} +| _time | _measurement | url | _field | _value | +| :------------------------ | :----------- | :---------------------------- | :---------------------------- | -----------: | +| {{< flux/current-time >}} | prometheus | http://localhost:8086/metrics | go_memstats_alloc_bytes_total | 1422764240.0 | + +| _time | _measurement | url | _field | _value | +| :------------------------ | :----------- | :---------------------------- | :------------------------------ | --------: | +| {{< flux/current-time >}} | prometheus | http://localhost:8086/metrics | go_memstats_buck_hash_sys_bytes | 5259247.0 | + +| _time | _measurement | task_type | url | le | _field | _value | +| :------------------------ | :----------- | :-------- | :---------------------------- | :--- | :-------------------------------- | -----: | +| {{< flux/current-time >}} | prometheus | system | http://localhost:8086/metrics | 0.25 | task_executor_run_latency_seconds | 4413 | + +| _time | _measurement | task_type | url | le | _field | _value | +| :------------------------ | :----------- | :-------- | :---------------------------- | :-- | :-------------------------------- | -----: | +| {{< flux/current-time >}} | prometheus | system | http://localhost:8086/metrics | 0.5 | task_executor_run_latency_seconds | 11901 | + +| _time | _measurement | task_type | url | le | _field | _value | +| :------------------------ | :----------- | :-------- | :---------------------------- | :-- | :-------------------------------- | -----: | +| {{< flux/current-time >}} | prometheus | system | http://localhost:8086/metrics | 1 | task_executor_run_latency_seconds | 12565 | + +| _time | _measurement | task_type | url | le | _field | _value | +| :------------------------ | :----------- | :-------- | :---------------------------- | :-- | :-------------------------------- | -----: | +| {{< flux/current-time >}} | prometheus | system | http://localhost:8086/metrics | 2.5 | task_executor_run_latency_seconds | 12823 | + +| _time | _measurement | task_type | url | le | _field | _value | +| :------------------------ | :----------- | :-------- | :---------------------------- | :-- | :-------------------------------- | -----: | +| {{< flux/current-time >}} | prometheus | system | http://localhost:8086/metrics | 5 | task_executor_run_latency_seconds | 12844 | + +| _time | _measurement | task_type | url | le | _field | _value | +| :------------------------ | :----------- | :-------- | :---------------------------- | :--- | :-------------------------------- | -----: | +| {{< flux/current-time >}} | prometheus | system | http://localhost:8086/metrics | +Inf | task_executor_run_latency_seconds | 74429 | + +| _time | _measurement | task_type | url | _field | _value | +| :------------------------ | :----------- | :-------- | :---------------------------- | :------------------------------------ | ----------------: | +| {{< flux/current-time >}} | prometheus | system | http://localhost:8086/metrics | task_executor_run_latency_seconds_sum | 425678353867.9698 | + +| _time | _measurement | task_type | url | _field | _value | +| :------------------------ | :----------- | :-------- | :---------------------------- | :-------------------------------------- | -----: | +| {{< flux/current-time >}} | prometheus | system | http://localhost:8086/metrics | task_executor_run_latency_seconds_count | 74429 | + +| _time | _measurement | task_type | taskID | url | quantile | _field | _value | +| :------------------------ | :----------- | :-------- | :---------------- | :---------------------------- | :------- | :------------------------- | ----------: | +| {{< flux/current-time >}} | prometheus | threshold | 00xx0Xx0xx00XX0x0 | http://localhost:8086/metrics | 0.5 | task_executor_run_duration | 5.178160855 | + +| _time | _measurement | task_type | taskID | url | quantile | _field | _value | +| :------------------------ | :----------- | :-------- | :---------------- | :---------------------------- | :------- | :------------------------- | ----------: | +| {{< flux/current-time >}} | prometheus | threshold | 00xx0Xx0xx00XX0x0 | http://localhost:8086/metrics | 0.9 | task_executor_run_duration | 5.178160855 | + +| _time | _measurement | task_type | taskID | url | quantile | _field | _value | +| :------------------------ | :----------- | :-------- | :---------------- | :---------------------------- | :------- | :------------------------- | ----------: | +| {{< flux/current-time >}} | prometheus | threshold | 00xx0Xx0xx00XX0x0 | http://localhost:8086/metrics | 0.99 | task_executor_run_duration | 5.178160855 | + +| _time | _measurement | task_type | taskID | url | _field | _value | +| :------------------------ | :----------- | :-------- | :---------------- | :---------------------------- | :----------------------------- | -----------------: | +| {{< flux/current-time >}} | prometheus | threshold | 00xx0Xx0xx00XX0x0 | http://localhost:8086/metrics | task_executor_run_duration_sum | 2121.9758301650004 | + +| _time | _measurement | task_type | taskID | url | _field | _value | +| :------------------------ | :----------- | :-------- | :---------------- | :---------------------------- | :------------------------------- | -----: | +| {{< flux/current-time >}} | prometheus | threshold | 00xx0Xx0xx00XX0x0 | http://localhost:8086/metrics | task_executor_run_duration_count | 570 | +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/influxdb/v2.5/reference/release-notes/_index.md b/content/influxdb/v2.5/reference/release-notes/_index.md new file mode 100644 index 000000000..1f8b39a29 --- /dev/null +++ b/content/influxdb/v2.5/reference/release-notes/_index.md @@ -0,0 +1,12 @@ +--- +title: Release notes +description: Find important information about what's included in new versions of InfluxData products. +menu: + influxdb_2_5_ref: + name: Release notes +weight: 1 +--- + +Find important information about what's included in new versions of our products: + +{{< children >}} diff --git a/content/influxdb/v2.5/reference/release-notes/influx-cli.md b/content/influxdb/v2.5/reference/release-notes/influx-cli.md new file mode 100644 index 000000000..0a5c25721 --- /dev/null +++ b/content/influxdb/v2.5/reference/release-notes/influx-cli.md @@ -0,0 +1,185 @@ +--- +title: influx CLI release notes +description: Important changes and and what's new in each version of the influx command line interface (CLI). +weight: 103 +menu: + influxdb_2_5_ref: + parent: Release notes + name: influx CLI +--- + +# v2.5.0 [2022-11-01] + +### Features + +- Add the `--username-password` flag to [influx config set](/influxdb/v2.4/reference/cli/influx/config/set/). Include `username:password` after this flag to ensure a session is automatically authenticated for the config. Include `username` (without password) to prompt for a password before creating the session. + +### Maintenance + +- Upgrade to Go 1.19. +- Fix Go version in `go.mod`. + +### Bug fixes + +- Fix to allow [influx auth create](/influxdb/v2.4/reference/cli/influx/auth/create/) to successfully create an API token without error. +- Fix stack error typo. +- Fix an error where `stdin` could not be used to create tasks. +- Update `data_type` to `dataType` to ensure CSV files are successfully uploaded. +- Fix to let you create a remote connection for InfluxDB 1.x without requiring a remote org ID (`remoteOrgID`). Add warning that `remoteOrgID` is required for InfluxDB Cloud and InfluxDB OSS, but not required for InfluxDB 1.x (OSS or Enterprise). + +## v2.4.0 [2022-08-18] + +### Features + +- Set membership type to member or owner with + [`influx org members add`](/influxdb/v2.5/reference/cli/influx/org/members/add/). +- Add the [InfluxQL Shell (REPL)](/influxdb/v2.5/reference/cli/influx/v1/shell/). +- **(InfluxDB Cloud only)** Manage [InfluxDB Cloud Invokable Scripts](/influxdb/cloud/api-guide/api-invokable-scripts/) + with [`influx scripts`](/influxdb/v2.5/reference/cli/influx/scripts/). +- **(InfluxDB OSS only)** Add [username and password support](/influxdb/v2.5/reference/cli/influx/config/create/#create-a-connection-configuration-that-uses-a-username-and-password) + to `influx` CLI connection configurations as alternative to API tokens when using the CLI. + +### Maintenance +- Upgrade to Go 1.18.3 + +### Bug fixes +- No longer scope user and organizations permissions to an individual organization. +- Properly handle API tokens starting with a hyphen (`-`) passed to the CLI without `=`. +- Mutually exclude organization names and organization IDs to eliminate confusion + on which to use and whether or not environment variables are overriding command flags. + +--- + +## v2.3.0 [2022-04-08] + +### Features + +- Add [`influx remote`](/{{< latest "influxdb" >}}/reference/cli/influx/remote/) command. +- Add [`influx replication`](/{{< latest "influxdb" >}}/reference/cli/influx/replication/) command. +- Enhanced error messaging for InfluxDB and OSS specific commands. +- Add `api/v2/config` endpoint to display the runtime configuration (for example, when you run `influxd print-config`). This endpoint lets you review runtime configuration while the instance is live. + +### Bug fixes + +- `Auth create` command supports multiple buckets. +- Use `influx-debug-id` header for tracing. +- Duration parser shows duration missing units on error. +- Template apply uses improved diff checking. +- Fix error applying `-e jsonnet` template. + +--- + +## v2.2.1 [2021-11-09] + +This release includes two new bug fixes. + +### Bug fixes + +- Improve error messages for unknown subcommands (`Error: command “…” not recognized.`) by describing how to +run `./influx --help` to see a list of valid commands. Thanks @slai! + +- Ensure `org members remove` API calls successfully remove a member from an organization by fixing accidental swap of `orgID` and `userID`. Thanks @geek981108! + +--- + +## v2.2.0 [2021-10-21] + +This release includes three new features and bug fixes. + +### Features + +This release makes it easier to create API tokens with the `influx` CLI, adds support for viewing more than 20 buckets using `influx bucket list`, and adds a shorthand flag for bucket (`-b`) to `influx delete`. + +{{% oss-only %}} + +#### Create an Operator token in the influx CLI + +Add the ability to use the `influx` CLI to [create an Operator token](/influxdb/v2.0/security/tokens/#operator-token) with read and write permissions to all resources in all organizations available in InfluxDB. (Note, this is the same permissions generated for the initial token created by `influx setup` or `influxd upgrade`.) + +{{% /oss-only %}} + +#### Create an All-Access token in the influx CLI + +Add the ability to use the `influx` CLI to [create an All-Access API token](/influxdb/cloud/security/tokens/create-token/#create-a-token-using-the-influx-cli) with read and write permissions to all resources in an organization. + +#### View more buckets in the influx CLI + +Update [`influx bucket list`](/influxdb/cloud/reference/cli/influx/bucket/list/) with pagination to support displaying more than 20 buckets. By default, buckets are fetched in batches of 20; set `--page-size` to override this default value. You may also limit the total number of buckets to display with `--limit` (by default, there's no limit). + +#### New bucket shorthand for influx delete + +Add the shorthand flag `-b` for `--bucket` to [`influx delete`](/influxdb/cloud/reference/cli/influx/delete/). + +### Bug fixes + +- Detect and warn when the Operator token is changed using `influx restore` (either setting a new `--active` config or updating the `INFLUX_TOKEN` variable). +- Set newly-created connection configuration as active in `influx setup`. +- Embed timezone data into Windows builds to avoid errors. + +--- + +## v2.1.1 [2021-09-24] + +### Go version + +Upgrade to Go 1.17. + +### Bug fixes + +- Fix shell completion for top-level `influx` commands. +- Make global `--http-debug` flag visible in help text. +- Don't set empty strings for IDs in permission resources. +- Detect and error out on incorrect positional arguments. +- Respect value of `--host` flag when writing CLI configs in `setup`. + +--- + +## v2.1.0 [2021-07-29] + +### New repository + +This is the initial release of the `influx` CLI from the `influxdata/influx-cli` GitHub repository. + +### Breaking changes + +#### `influx write` skip-header parsing + +To simplify the CLI parser, the `write` command no longer supports `--skipHeader` +as short-hand for `--skipHeader 1`. + +#### Stricter input validation for `influx template` commands + +The `apply`, `export`, and `stacks` commands now raise errors when CLI options fail to parse instead of silently discarding bad inputs. +This change was made to help users debug when their commands fail to execute as expected. + +#### Server-side template summarization and validation + +The `template` and `template validate` commands now use an API request to the server to perform their logic, instead of performing the work on the client-side. +Offline summarization and validation is no longer supported. +This change was made to avoid significant code duplication between `influxdb` and `influx CLI`, and to allow server-side template logic to evolve without requiring coordinated CLI changes. + +#### `influx stacks --json` output conventions + +The output of `influx stacks --json` previously used an UpperCamelCase naming convention for most keys. +The command now uses lowerCamelCase consistently for all objects keys, matching the schema returned by the API. + +### Features + +- Add global `--http-debug` flag to all `influx` commands to help inspect communication with InfluxDB servers. +- Update [`bucket create`](/influxdb/cloud/reference/cli/influx/bucket/create/) to allow setting a schema type. +- Update [`bucket list`](/influxdb/cloud/reference/cli/influx/bucket/list/) to display schema types. +- Bind [`--skip-verify`](/influxdb/cloud/reference/cli/influx/org/members/add/#flags) flag to the `INFLUX_SKIP_VERIFY` environment variable. +- (InfluxDB Cloud only) Add [`buck +- (InfluxDB OSS only) Updates to `backup` and `restore`: + - Reimplement [`backup`](/influxdb/cloud/reference/cli/influx/backup/) to support downloading embedded SQL store from InfluxDB 2.0 or later. + - Add [`--compression`](/influxdb/v2.0/reference/cli/influx/backup/_index.md) flag to support GZIP compression of downloaded files. + - Reimplement `restore` to support uploading embedded SQL store from InfluxDB v2.1.x. +- (InfluxDB OSS only) Add [`--password`](/influxdb/cloud/reference/cli/influx/user/password/) flag to `user password` command to allow bypassing interactive prompt. + +### Bug fixes + +- Fix interactive password collection and color rendering in PowerShell. +- `org members list` no longer hangs on organizations with more than 10 members. +- Detect and warn when inputs to `write` contain standalone CR characters. +- `dashboards` command now accepts `--org` flag, or falls back to default org in config. +- Return a consistent error when responses fail to decode, including hints for OSS-only and Cloud-only commands. diff --git a/content/influxdb/v2.5/reference/release-notes/influxdb.md b/content/influxdb/v2.5/reference/release-notes/influxdb.md new file mode 100644 index 000000000..3413601ec --- /dev/null +++ b/content/influxdb/v2.5/reference/release-notes/influxdb.md @@ -0,0 +1,1902 @@ +--- +title: InfluxDB OSS release notes +description: Important changes and what's new in each version of InfluxDB. +menu: + influxdb_2_5_ref: + name: InfluxDB + parent: Release notes +weight: 101 +--- + +## v2.5 [2022-11-01] + +### Features + +- Updated user interface (UI). +- Allow for incremental changes to `fields.idx`. + +### Security updates + +- Restriction to view tokens only once, immediately after creation. +- Set `SameSite=strict` on session cookie. + +### Bug fixes + +- Manually scheduled tasks run when expected. +- Fix error where virtual DBRP mappings were being ignored. +- Ability to back up all buckets in your InfluxDB instance. +- Improved speed of deletes when measurement is part of the predicate. +- Improved error messaging when attempting to create user with an ID that already exists. + +### Maintenance + +- Upgrade to [Flux 0.188.1](/flux/v0.x/release-notes/#v01870-2022-10-17) +- Upgrade to [Go 1.18.7](https://go.dev/doc/go1.18) +- Upgrade to [Rust 1.63.0](https://www.rust-lang.org/) + +## v2.4 [2022-08-19] + +### Features + +- Support bucket names in the `replications` API and allow [InfluxDB 1.x DBRP](/influxdb/v2.5/query-data/influxql/dbrp) names as bucket names. +- Add the [InfluxQL shell (REPL)](/influxdb/v2.5/reference/cli/influx/v1/shell/). +- Change to operator model so that `admin` user has instance-level permissions without `operator` token. + +#### Flux advancement highlights +- Add the [`join` package](/flux/v0.x/stdlib/join/) to support inner joins, right outer joins, left outer joins, and full outer joins. +- Promote `experimental.to()` to [`influxdb.wideTo()`](/flux/v0.x/stdlib/influxdata/influxdb/wideto/). +- Add [`initialZero`](/flux/v0.x/stdlib/universe/derivative/#initialzero) parameter to [`derivative()`](/flux/v0.x/stdlib/universe/derivative/). +- Add `time()` function to the [`date package`](flux/v0.x/stdlib/date/) to convert timeable types (time and duration) to time types. +- Promote the following functions from `experimental.array` into the [`array` package](/flux/v0.x/stdlib/array/): + - [`array.concat()`](/flux/v0.x/stdlib/array/concat/) + - [`array.filter()`](/flux/v0.x/stdlib/array/filter/) + - [`array.map()`](/flux/v0.x/stdlib/array/map/) +- Promote the following functions from the experimental `http.requests` package into the [`http/requests` package](/flux/v0.x/stdlib/http/requests/): + - [`requests.do()`](/flux/v0.x/stdlib/http/requests/do/) + - [`requests.get()`](/flux/v0.x/stdlib/http/requests/get/) + - [`requests.peek()`](/flux/v0.x/stdlib/http/requests/peek/) + - [`requests.post()`](/flux/v0.x/stdlib/http/requests/post/) +- Promote `experimental.bitwise()` into the [`bitwise` package](/flux/v0.x/stdlib/bitwise/). +- Add new [experimental.catch()](/flux/v0.x/stdlib/experimental/catch/) and [testing.shouldError()](/flux/v0.x/stdlib/testing/shoulderror/) functions. +- Support conditional expressions, constants, and literals in vectorized [`map()`](flux/v0.x/stdlib/universe/map/). +- Optimize [`holtWinters()`](/flux/v0.x/stdlib/universe/holtwinters/) and redundant sorts. +- Deprecate the following experimental functions: + - `http.get()` + - `csv.from()` + +### Bug fixes + +- Log the log level at startup. +- Fix panic from `CreateIterator` in Flux queries. +- Fix error-caching in `bufio.Writer`. +- Remove `MATCHER_BEHAVIOR` environment variable. + +### Maintenance + +- Upgrade to [Go 1.18.4](https://go.dev/doc/go1.18). +- Upgrade to [Flux 0.179.0](/flux/v0.x/release-notes/#v01790-2022-08-15). + +## v2.3.0 [2022-6-17] + +This release includes the following [maintenance](#maintenance), [features](#features), [security updates](#security-updates) and [bug fixes](#bug-fixes). + +{{% note %}} +#### Production ready: replicate data remotely + +InfluxDB 2.2 introduced a technical preview of how to [replicate data from InfluxDB OSS to InfluxDB Cloud](/influxdb/v2.3/write-data/replication). This feature is now production ready. +{{% /note %}} + +### Maintenance + +- Upgrade from Flux 0.161.0 to [Flux 0.171.0](/flux/v0.x/release-notes/#v01710-2022-06-14). +- Upgrade to [Go 1.18.3](https://go.dev/doc/go1.18). + +### Features + +- Add the [`--instance-id`](/influxdb/v2.3/reference/config-options/?t=JSON#instance-id) flag to `influxd` runtime to add the `_instance_id` tag to remote replications metrics. Helps to identify where the metrics came from, and ensures that two instances don't overwrite each others' data. +- Add signifier to replication `user-agent` to signify when replication write requests are made. Helps to identify metrics written via replications. + +#### Flux advancement highlights + +##### Performance improvements + +- Vectorize arithmetic operators in `map()`. +- Vectorize logical operations in `map()`. +- Enable `movingAverage()` and `cumulativeSum()` optimizations by default. + +##### Other highlights + +- Add `preview()` to experimental package for limiting return rows and tables (as opposed to just rows with `limit()`). +- Add `date.scale()` to let users dynamically scale durations in dates. +- Add [OpenTracing](https://opentracing.io/docs/overview/spans/) spans to Flux transformations. Lets you monitor Flux scripts more precisely. +- Add `trace` option to Flux CLI. +- Rename `addDuration()` to [add](/flux/v0.x/stdlib/date/add/) and `subDuration()` to [sub](/flux/v0.x/stdlib/date/sub/), +and moved both of these functions from the experimental package to the date package. +- Add location support to `date.truncate()`. +- Add `_status` tag to PagerDuty record. +- Refactor operator profile to be in the query statistics. + +### Security updates + +Several security issues were fixed in dependencies and the toolchain used to build InfluxDB, including: +- An issue in the `gopkg.in/yaml.v3` package import that could lead to a DoS in the templates service. +- An issue in the `github.com/buger/jsonparser` package import that could potentially lead to a DoS in storage authorization. +- Cumulative security fixes for Go 1.17.8-[1.18.3](https://go.dev/doc/devel/release#go1.18.minor) are included in this release. + These fixes resolve the following InfluxDB issues: + - An issue with processing large PEM files that could lead to a DoS in the templates service or flux connections using `to()`. + - An issue in TLSv1.3 and a lack of ticket randomness. + - A minor issue with `filepath.Clean()` on Windows. + +### Bug fixes + +- Fix race condition when remote replicating deadlocks the remote writer that prevented writes to the target. +- Resolve error when creating v1 authorization (`v1 auth`) with a nonexistent bucket ID. +- Add fields to the `_tasks` bucket to match schema of the same bucket in InfluxDB Cloud. Provides consistency for clients accessing both. +- Fix rare case where measurement cardinality reported less than zero. +- Resolve panic on cleaning up failed iterators. + +## v2.2.0 [2022-04-06] + +This release includes the following new [features](#features) and several [bug fixes](#bug-fixes). + +### Features + +- [Technical preview: replicate data remotely](#technical-preview-replicate-data-remotely) +- [Flux updates](#flux-updates) +- [Build maintenance](#build-maintenance) +- [Task metadata](#task-metadata) +- [Troubleshoot with new metrics](#troubleshoot-with-new-metrics) +- [Display runtime configuration in use by `influxd`](#display-runtime-configuration-in-use-by-influxd) +- [Recover user credentials](#recover-user-credentials) +- [Security updates](#security-updates), including a new `hardening-enabled` option to enable additional security. + +#### Technical preview: replicate data remotely + +Add the option to [replicate data from InfluxDB OSS to InfluxDB Cloud](/influxdb/v2.3/write-data/replication). + +{{% warn %}} +On rare occasions, remote write failures may cause data in the replication queue to get stuck. To ensure data is not dropped, restart the InfluxDB instance replicating data. + +To assess whether this issue is occurring, we recommend periodically doing one of the following: +- Verify your data is successfully replicated to your target bucket. +- View your queue size using [`influx replication list`](/influxdb/cloud/reference/cli/influx/replication/list/) for unexpected growth. +{{% /warn %}} + +Replicating data remotely lets you do following: + - Create a durable subscription on a per bucket basis for the purposes of replicating data on-write from an InfluxDB OSS instance to InfluxDB Cloud. + - Store, analyze, and aggregate data locally while also forwarding newly arriving data to a centralized InfluxDB Cloud account. + - Configure a maximum buffer size and data age restrictions to restrict the amount of data stored on disk. + +#### Flux updates + +- Update to [Flux v0.150.0](/flux/v0.x/release-notes/#v01500-2022-01-19). +- Add option to log Flux queries that are cancelled because of server shutdown. + +#### Build maintenance + +- Upgrade `protobuf` library. + +#### Task metadata + +- Add option to pass `type=basic` to [`/api/v2/tasks`](/influxdb/v2.3/api/#tag/Tasks) to return task metadata without the query text. + +#### Troubleshoot with new metrics + +To assist with troubleshooting and operational management, expose the following metrics in Prometheus Exposition Format. + +##### TSM compaction metrics + +- `storage_compaction` +- `storage_compactions_active` +- `storage_compactions_duration_seconds_bucket` +- `storage_compactions_duration_seconds_count` +- `storage_compactions_duration_seconds_sum` +- `storage_compactions_queued` +- `storage_compactions_total` + +##### Failed to add to replication queue metrics + +- `PointsFailedToQueue` +- `BytesFailedToQueue` + +##### Write status metrics + +- `pointsWriteRequested` +- `pointsWriteOk` +- `pointsWriteDropped` +- `pointsWriteErr` +- `timeout` + +##### Disk size metrics per shard + +- `total`: number of files per shard +- `disk_bytes`: data size in bytes for each shard + +##### Cache subsystem metrics + +- `inuse_bytes`: Current memory consumption of cache +- `diskBytes`: Size of most recent snapshot" +- `latest_snapshot`: Unix time of most recent snapshot +- `writes_total`: Counter of all writes to cache +- `writes_err`: Counter of failed writes to cache +- `writes_dropped`: Counter of writes (with any dropped points) to cache + +##### WAL subsystem metrics + +- `size`: Size of WAL in bytes +- `writes`: Number of write attempts to the WAL +- `writes_err`: Number of failed write attempts to the WAL + +See [InfluxDB OSS metrics](/influxdb/v2.3/reference/internals/metrics/) for additional detail. + +#### Display runtime configuration in use by `influxd` + +- Add `api/v2/config` endpoint to display the runtime configuration (for example, when you run `influxd print-config`). This endpoint lets you review runtime configuration while the instance is live. + +#### Recover user credentials + +To recover user credentials, use [`influx recovery user list`](/influxdb/v2.3/reference/cli/influxd/recovery/user/list/) to retrieve a list of users, and then use [`influx recovery user update`](/influxdb/v2.3/reference/cli/influxd/recovery/user/update/) to update the password for a specified user. + +### Security updates + +- Add the [`hardening-enabled`](/influxdb/v2.3/security/enable-hardening) option to limit flux/pkger HTTP requests. The new `hardening-enabled` option ensures that InfluxDB first verifies the IP address of the URL is not private. + By default, Flux HTTP and template fetching requests are allowed to access localhost and private IP addresses. + - Disable use of jsonnet with `/api/v2/templates/apply`. +This prevents crafted authenticated requests from exfiltrating files accessible to the user InfluxDB runs as. +- Add read permissions check for querying data. + This prevents authenticated requests using a write-only token from reading data + via the InfluxQL `/query` compatibility API. +- Add write permissions check for `DELETE` and `DROP MEASUREMENT`. + This prevents authenticated requests using a read-only token from deleting data + via the InfluxQL `/query` compatibility API. +Additionally, several security issues were fixed in dependencies and +the toolchain used to build InfluxDB, including: +- The following cumulative security fixes for [Flux v0.161.0](/flux/v0.x/release-notes/) since 0.139.0 are included in this release: + - Quote db identifiers. + This addresses injection vulnerabilities in database connections using `to()`. + - Make substring check bounds correctly. + This prevents authenticated queries from crashing the Flux engine. +- The cumulative security fixes for [Go 1.17.8](https://go.dev/doc/devel/release#go1.17.minor) since Go 1.17.2 are included in this release. + This addresses an issue in the InfluxDB test suite. + +### Bug fixes + +- Ensure manual task runs can happen concurrently. +- Extend snapshot copy of backup to filesystems that do not support hard links. +- Detect misquoted tag values and return an error. +- Fix potential deadlock in `influxd inspect dump-tsi`. +- Successfully handle errors returned by `Sketch.Merge`. +- Return `X-version` and `X-Build` headers for all requests to `/ping` endpoint. +- Add error when `meta.db` is missing. +- Sync TSI index file before closing. +- Fix race condition that could cause `restore` command to fail. +- To successfully restore a backup, use copy when a rename spans volumes. +- Disable use of jsonnet with `/api/v2/templates/apply`. +- Ensure that updating a check (`/checks/{checkID}`) does not require an owner ID. +- Remove NATS for scraper processing. Note, this fix does not alter scraper functionality--scrapers still work as they did before. + - `nats-port` and `nats-max-payload-bytes` flags have been deprecated. + - NATS is no longer embedded in InfluxDB. Because InfluxDB no longer requires a port for NATS, port conflict issues are reduced. +- Resolve the issue that prevented the browser from tracking the cookie `expiry` correctly, causing the cookie to expire automatically when restarting the browser or changing tabs. Now, the cookie is correctly preserved. +- Allow unlimited Flux HTTP calls. Previously, HTTP requests failed silently after 100MB of data transfer. +- Remove pagination limits on the `/telegrafs` API. Previously, pagination wasn't exposed to the API, so API requests were limited to the default 20 pages. + +## v2.1.1 [2021-11-08] + +{{% note %}} +To address underlying installation package issues, we bumped the 2.1 release version to 2.1.1. +{{% /note %}} + +This release includes several new [features](#features) and [bug fixes](#bug-fixes). + +### `influx` CLI moved to separate repository + +The `influx` CLI has been moved to its own GitHub [repository](https://github.com/influxdata/influx-cli/). + +{{% warn %}} +#### `influxdb` release artifacts affected +Release artifacts produced by `influxdb` are impacted as follows: + +- Release archives (`.tar.gz` and `.zip`) **no longer contain** the `influx` binary. +- The `influxdb2` package (`.deb` and `.rpm`) **no longer contains** the `influx` binary. Instead, the package declares a recommended dependency on the new `influxdb2-cli` package. +- The `quay.io/influxdb/influxdb` image **no longer contains** the `influx` binary. We recommend migrating to the `influxdb` image hosted on [DockerHub](https://hub.docker.com/_/influxdb). +{{% /warn %}} + +#### Find supported `influx` CLI commands + +With this change, versions of the `influx` CLI and InfluxDB server (`influxd`) are not guaranteed to exactly match. To check the version of the `influxd` server, see `influxd version` in `influx` CLI or use the `/health` endpoint of your InfluxDB server. + +{{% note %}} +The [`influx` CLI documentation](/influxdb/v2.3/reference/cli/influx/) has been updated to reflect which `influx` CLI commands work with which versions of InfluxDB. +{{% /note %}} + +### Features + +This release includes the following new features: + +- [Notebooks, annotations, and visualization updates](#notebooks-annotations-and-visualization-updates) +- [API](#api) and [CLI](#cli) updates +- Support for latest [Flux](#flux) and [Telegraf](#telegraf) releases +- Updates to the [InfluxQL](#influxql) +- [Token](#tokens) updates +- [Flux location support](#flux-location-support) + +#### Notebooks, annotations, and visualization updates + +- Add support for [notebooks](/influxdb/v2.3/notebooks/) and [annotations](/influxdb/v2.3/visualize-data/annotations/). +- Add support for static legends to line graphs and band plots. +- Enable new dashboard auto-refresh. +- Simplify display of data for table visualizations. + +#### API + +- Add new parameters to GET [`/users`](/influxdb/v2.3/api/#operation/GetUsers) API, including: `offset`, `limit`, and `after`. +- Add the [`api/v2/backup/metadata`](/influxdb/v2.3/api/#operation/GetBackupMetadata) endpoint for backing up both key-value and SQL metadata, and the [`api/v2/restore/sql`](/influxdb/v2.3/api/#operation/GetRoutes) for restoring SQL metadata. +- Deprecated [`POST .../secrets/delete`](/influxdb/v2.3/api/#operation/PostOrgsIDSecrets). To delete a secret, use [`DELETE .../secrets/{secretID}`](/influxdb/v2.3/api/#operation/DeleteOrgsIDSecretsID). + +#### CLI + +##### influxd configuration + +Added several new configuration options to [`influxd`](/influxdb/v2.3/reference/cli/influxd/): + +- Add `influxd recovery` command to let you create a recovery [Operator token](/influxdb/v2.3/security/tokens/#operator-token). +- Add `--sqlite-path` flag for specifying a user-defined path to the SQLite database file. +- Add `--storage-wal-max-concurrent-writes` flag to enable tuning memory pressure under heavy write load. +- Add `--storage-wal-max-write-delay` flag to prevent deadlocks when the WAL is overloaded with concurrent writes. +- Add `--storage-write-timeout` flag to set write request timeouts. +- Add `--storage-no-validate-field-size` flag to disable enforcement of max field size. +- Update `--store` flag to work with string values disk or memory. Memory continues to store metadata in-memory for testing; disk persists metadata to disk via bolt and SQLite. + +For more information, see [InfluxDB configuration options](/influxdb/v2.3/reference/config-options/). + +##### influxd inspect + +Ported the following [`influxd inspect`](/influxdb/v2.3/reference/cli/influxd/inspect/) commands from InfluxDB 1.x: + +- [influxd inspect build-tsi](/influxdb/v2.3/reference/cli/influxd/inspect/build-tsi/) +- [influxd inspect delete-tsm](/influxdb/v2.3/reference/cli/influxd/inspect/delete-tsm/) +- [influxd inspect dump-tsi](/influxdb/v2.3/reference/cli/influxd/inspect/dump-tsi/) +- [influxd inspect dump-tsm](/influxdb/v2.3/reference/cli/influxd/inspect/dump-tsm/) +- [influxd inspect dump-wal](/influxdb/v2.3/reference/cli/influxd/inspect/dump-wal/) +- [influxd inspect report-tsi](/influxdb/v2.3/reference/cli/influxd/inspect/report-tsi/) +- [influxd inspect report-tsm](/influxdb/v2.3/reference/cli/influxd/inspect/report-tsm/) +- [influxd inspect verify-seriesfile](/influxdb/v2.3/reference/cli/influxd/inspect/verify-seriesfile/) +- [influxd inspect verify-tombstone](/influxdb/v2.3/reference/cli/influxd/inspect/verify-tombstone/) +- [influxd inspect verify-tsm](/influxdb/v2.3/reference/cli/influxd/inspect/verify-tsm/) +- [influxd inspect verify-wal](/influxdb/v2.3/reference/cli/influxd/inspect/verify-wal/) + +##### influxd downgrade + +Added the [influxd downgrade command](/influxdb/v2.3/reference/cli/influxd/downgrade/) +to migrate InfluxDB key-value metadata schemas to earlier 2.x versions when necessary. + +#### Flux + +- Update to [Flux v0.139.0](/flux/v0.x/release-notes/#v01390-2021-11-01). +- Enable writing to remote hosts using the Flux [`to()`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/to/) and [`experimental.to()`](/{{< latest "flux" >}}/v0.x/stdlib/experimental/to/) functions. +- Flux now supports locations that dynamically modify time offsets based on your specified timezone. You can also specify fixed time offsets relative to UTC. +- Perform [bitwise operations](/{{< latest "flux" >}}/stdlib/experimental/bitwise/) + on integers and unsigned integers. +- [Query](/{{< latest "flux" >}}/query-data/sql/vertica/) and + [write to Vertica](/{{< latest "flux" >}}/query-data/sql/vertica/) SQL databases. +- Add the [`hex` package](/{{< latest "flux" >}}/stdlib/contrib/bonitoo-io/hex/) + for working with hexadecimal string values. + +#### InfluxQL + +- `SHOW MEASUREMENTS ON` now supports database and retention policy wildcards. For example, `SHOW MEASUREMENTS ON *.*` to show all databases and `SHOW MEASUREMENTS ON .*` to show all retention policies. +- Add hyper log operators `merge_hll`, `sum_hll`, and `count_hll` in InfluxQL to optimize series iteration. (`count_hll` optimizes queries that can be answered without inspecting TSM data.) + +#### Telegraf + +- Add the following new [Telegraf plugins](https://docs.influxdata.com/telegraf/v1.20/plugins/) to the Load Data page: + - Alibaba (Aliyun) CloudMonitor Service Statistics (`aliyuncms`) + - AMD ROCm System Management Interface (SMI) (`amd_rocm_smi`) + - Counter-Strike: Global Offensive (CS:GO) (`csgo`) + - Directory Monitoring (`directory_monitor`) + - Intel Data Plane Development Kit (DPDK) (`dpdk`) + - Elasticsearch Query (`elasticsearch_query`) + - Internet Speed Monitor (`internet_speed`) + - KNX (`knx_listener`) + - Mdstat (`mdstat`) + - Netstat (`netstat`) + - NFS Client (`nfsclient`) + - OpenTelemetry (`opentelemetry`) + - RavenDB (`ravendb`) + - SQL (`sql`) + - Trig (`trig`) + - Twemproxy (`twemproxy`) + +For more information about each plugin, see [Telegraf plugins](/telegraf/v1.20/plugins/), and search for the plugin name. + +#### Tokens + +- Add support for standard Bearer token syntax. Now you can specify token credentials as: `Authorization: Bearer xxxxxxxx`. +- If restoring a backup overwrites the Operator token, the new token value is returned. + +### Bug fixes + +- Log API errors to server logs and tell clients to check the server logs for the error message. +- Fix pagination for GET [`/buckets`](/influxdb/v2.3/api/#operation/GetBuckets) API when displaying results. Previously, pagination was broken if a request included both an `org` filter AND the `after` request parameter. Also corrects `descending` parameter to sort when an `org` filter is used and saved. +- Sync series segment to disk after writing. +- Do not allow shard creation to create overlapping shards. +- Don't drop shard group durations when upgrading InfluxDB. + +## v2.0.9 [2021-09-27] + +This release includes several new [features](#features) and [bug fixes](#bug-fixes). + +### Features + +New features include: + +- [API updates](#api-updates) +- [Flux updates](#flux-updates) +- [Performance enhancements](#performance-enhancements) + +#### API updates + +- Add a new route `/api/v2/resources` that returns a list of known resources to the platform, including the following resource types. Makes it easier to update all-access tokens with current resources: + + - `AuthorizationsResourceType` + - `BucketsResourceType` + - `ChecksResourceType` + - `DashboardsResourceType` + - `DBRPResourceType` + - `DocumentsResourceType` + - `LabelsResourceType` + - `NotificationEndpointResourceType` + - `NotificationRuleResourceType` + - `OrgsResourceType` + - `ScraperResourceType` + - `SecretsResourceType` + - `SourcesResourceType` + - `TasksResourceType` + - `TelegrafsResourceType` + - `UsersResourceType` + - `VariablesResourceType` + - `ViewsResourceType` + +#### Flux updates + +- Update to [Flux v0.130.0](/flux/v0.x/release-notes/#v01300-2021-09-15). +- Add support for [`influxdb.cardinality()`](/flux/v0.x/stdlib/influxdata/influxdb/cardinality/) function. +- Operational improvements: + - Add logging to Flux end-to-end tests (`TestFluxEndToEnd`) to help diagnose test failures. + - Add `--flux-log-enabled` option to [`influxd`](/influxdb/v2.3/reference/config-options/) to show detailed logs for Flux queries. + +#### Performance enhancements + +- Optimize series iteration for queries that can be answered without inspecting TSM data. +- Optimize queries with predicates that contain multiple measurements. + +### Bug fixes + +This release includes the following bug fixes and updates: + +- [API fix](#api-fix) +- [Dependency update](#dependency-update) +- [Error updates](#error-updates) +- [Limit update](#limit-update) +- [Miscellaneous operational fixes](#miscellaneous-operational-fixes) +- [Task updates](#task-updates) +- [Version maintenance](#version-maintenance) + +#### API fix + +- Correctly filter requests to `/api/v2/authorizations` by `org` and `user` parameters. + +#### Dependency update + +- Include `curl` as a dependency in `influxdb2` packages. + +#### Errors updates + +- Add message to set the Flux `content-type` when a query fails to parse as JSON. +- Discard excessive errors over `DefaultMaxSavedErrors (100)` to prevent out-of-memory crashes. +- Upgrade `golang.org/x/sys` to avoid panics on macs. + +#### Limit update + +- Implement hard limit on field size (`MaxFieldValueLength = 1048576`) while parsing line protocol. + +#### Miscellaneous operational fixes + +- Resolve the compaction queue stats flutter. +- Ensure the TSI index compacts log files that meet one of the following criteria: + - Log file hasn't been updated (no new series have been added to the shard) for 4 (or more) hours (to change this duration, specify a new [`storage-compact-full-write-cold-duration`](/influxdb/v2.3/reference/config-options/#storage-compact-full-write-cold-duration)) + - Log file is one (or more) megabytes (to update this size, specify a new [`storage-max-index-log-file-size`](/influxdb/v2.3/reference/config-options/#storage-max-index-log-file-size)) +- Repair bad port dropping return value names. +- Use consistent path separator in permission string representation. +- (Windows only) Copy snapshot files being backed up. + +#### Task updates + +- Updating an inactive task no longer schedules it. Thanks @raffs! +- Preserve comments in Flux queries when saving task definitions. + +#### Version maintenance + +- Fix `X-Influxdb-Build` and `X-Influxdb-Version` response header at `/ping`. +- Upgrade `influxql` to latest version and fix predicate handling for `SHOW TAG VALUES` meta queries. + +## v2.0.8 [2021-08-13] + +{{% warn %}} #### Upcoming changes to influx CLI packaging + +Beginning in InfluxDB 2.1, the `influx` CLI will no longer be packaged with the release. Future versions of `influx` CLI will be released from the [influx-cli](https://github.com/influxdata/influx-cli) repository. + +To adopt the new, separate `influx` CLI early, download the latest release from [GitHub](https://github.com/influxdata/influx-cli/releases/tag/v2.3.0) or from the [InfluxData Downloads portal](https://portal.influxdata.com/downloads/). +{{% /warn %}} + +### Go version + +- Upgrade to Go 1.16. **Requires macOS Sierra 10.12 or later to run.** + +### Features + +- Add `--ui-disabled` option to `influxd` to support running with the UI disabled. +- Telemetry improvements: Do not record telemetry data for non-existent paths; replace invalid static asset paths with a slug. +- Upgrade to Flux v0.124.0. +- Upgrade to UI v2.0.8. +- Upgrade `flux-lsp-browser` to v0.5.53. + +### Bug fixes + +- Rename ARM RPM packages with yum-compatible names. +- Upgrade to latest version of `influxdata/cron` so that tasks can be created with interval of `every: 1w`. +- Avoid rewriting `fields.idx` unnecessarily. +- Do not close connection twice in DigestWithOptions. +- Remove incorrect optimization for group-by. +- Return an error instead of panicking when InfluxQL statement rewrites fail. +- Migrate restored KV snapshots to latest schema before using them. +- Specify which fields are missing when rejecting an incomplete onboarding request. +- Ensure `systemd` unit blocks on startup until HTTP endpoint is ready. +- Fix display and parsing of `influxd upgrade` CLI prompts in PowerShell. +- Removed unused `chronograf-migator` package and chronograf API service, and updated various "chronograf" references. +- Fix display and parsing of interactive `influx` CLI prompts in PowerShell. +- Upgrade to `golang-jwt` 3.2.1. +- Prevent silently dropped writes when there are overlapping shards. +- Invalid requests to `/api/v2` subroutes now return 404 instead of a list of links. +- Flux meta queries for `_field` take fast path if `_measurement` is the only predicate. +- Copy names from mmapped memory before closing iterator. + +## v2.0.7 [2021-06-04] + +### Features + +- Optimize [`table.fill()`](/{{< latest "flux" >}}/stdlib/experimental/table/fill/) + execution within Flux aggregate windows. +- Upgrade Flux to [v0.117.0](/{{< latest "flux" >}}/release-notes/#v01171-2021-06-01). +- Upgrade UI to v2.0.7. +- Upgrade `flux-lsp-browser` to v0.5.47. + +### Bug Fixes + +- Fix query range calculation (off by one) over partially compacted data. +- Deprecate the unsupported `PostSetupUser` API. +- Add limits to the `/api/v2/delete` endpoint for start and stop times with error messages. +- Add logging to NATS streaming server to help debug startup failures. +- Accept `--input` instead of a positional argument in `influx restore`. +- Print error instead of panicking when `influx restore` fails to find backup manifests. +- Set last-modified time of empty shard directory to the directory's last-modified time, instead of the Unix epoch. +- Remove deadlock in `influx org members list` when an organization has greater than 10 members. +- Replace telemetry file name with slug for `ttf`, `woff`, and `eot` files. +- Enable use of absolute path for `--upgrade-log` when running `influxd upgrade` on Windows. +- Make InfluxQL meta queries respect query timeouts. + +--- + +## v2.0.6 General Availability [2021-04-29] + +### Bug Fixes +- Ensure query configuration written by `influxd upgrade` is valid. +- Set `query-concurrency` and `query-queue-size` configuration option defaults + to `0` to avoid validation failures when upgrading users. +- Correctly validate when `query-concurrency` is `0` and `query-queue-size` is + greater than `0`. + +## v2.0.5 General Availability [2021-04-27] + +{{% warn %}} +InfluxDB v2.0.5 introduced a defect that prevents users from successfully upgrading +from InfluxDB 1.x to 2.0 using the `influxd upgrade` command or Docker. +To [automatically upgrade from 1.x to 2.0](/influxdb/v2.3/upgrade/v1-to-v2/automatic-upgrade/) +with the `influxd upgrade` command or [with Docker](/influxdb/v2.3/upgrade/v1-to-v2/docker/), +use [InfluxDB v2.0.6](#v206-general-availability-2021-04-29). +{{% /warn %}} + +### Windows Support +This release includes our initial Windows preview build. + +### Breaking Changes + +#### /debug/vars removed +Prior to this release, the `influxd` server would expose profiling information over the `/debug/vars` endpoint. +This endpoint was unauthenticated and not used by InfluxDB systems to report diagnostics. +For security and clarity, the endpoint has been removed. +Use the `/metrics` endpoint to collect system statistics. + +#### `influx transpile` removed +The `transpile` command has been removed. Send InfluxQL requests directly to the server via the `/api/v2/query` +or `/query` HTTP endpoints. + +#### Default query concurrency changed +The default setting for the max number of concurrent Flux queries has been changed from 10 to unlimited (`0`). +To limit query concurrency and queue size: + +1. Set the `query-concurrency` config parameter to > 0 when running `influxd` to re-limit the maximum running query count, +2. Set the `query-queue-size` config parameter to > 0 to set the max number of queries that can be queued before the + server starts rejecting requests. + +#### Prefix for query-controller metrics changed +The prefix used for Prometheus metrics from the query controller has changed from `query_control_` to `qc_`. + +### Features +- Add [Swift client library](https://github.com/influxdata/influxdb-client-swift) + to the **Load Data** section of the InfluxDB UI. +- Add [`influx task retry-failed` command](/influxdb/v2.3/reference/cli/influx/task/retry-failed/) to rerun failed runs. +- Add [`--compression` option](/influxdb/v2.3/reference/cli/influx/write/#flags) + to the `influx write` command to support Gzip inputs. +- Add new `influxd` configuration options: + - [pprof-disabled](/influxdb/v2.3/reference/config-options/#pprof-disabled) + - [metrics-disabled](/influxdb/v2.3/reference/config-options/#metrics-disabled) + - [http-read-header-timeout](/influxdb/v2.3/reference/config-options/#http-read-header-timeout) + - [http-read-timeout](/influxdb/v2.3/reference/config-options/#http-read-timeout) + - [http-write-timeout](/influxdb/v2.3/reference/config-options/#http-write-timeout) + - [http-idle-timeout](/influxdb/v2.3/reference/config-options/#http-idle-timeout) +- Add `/debug/pprof/all` HTTP endpoint to gather all profiles at once. +- Include the InfluxDB 1.x `http.pprof-enabled` configuration option in the 2.0 configuration file generated by the [InfluxDB upgrade process](/influxdb/v2.3/upgrade/v1-to-v2/automatic-upgrade/). +- Add support for [custom shard group durations](/influxdb/v2.3/reference/cli/influx/bucket/create#create-a-bucket-with-a-custom-shard-group-duration) on buckets. +- Optimize regular expression conditions in InfluxQL subqueries. +- Update Telegraf plugins in the InfluxDB UI to include additions and changes from + [Telegraf 1.18](/telegraf/v1.18/about_the_project/release-notes-changelog/#v118-2021-3-17). +- Display task IDs in the tasks list in the InfluxDB UI. +- Write to standard output (`stdout`) when `--output-path -` is passed to [`influxd inspect export-lp`](/influxdb/v2.3/reference/cli/influxd/inspect/export-lp/). +- Add `-p, --profilers` flag to [`influx query` command](/influxdb/v2.3/reference/cli/influx/query/) + to enable [Flux profilers](/{{< latest "flux" >}}/stdlib/profiler/) on + a query executed from the `influx` CLI. +- Update InfluxDB OSS UI to match InfluxDB Cloud. +- Support disabling concurrency limits in the Flux controller. +- Replace unique resource IDs (UI assets, backup shards) with slugs to reduce + cardinality of telemetry data. +- Standardize HTTP server error log output. +- Enable InfluxDB user interface features: + - [Band visualization type](/influxdb/v2.3/visualize-data/visualization-types/band/) + - [Mosiac visualization type](/influxdb/v2.3/visualize-data/visualization-types/mosaic/) + - [Configure axis tick marks](/influxdb/v2.3/visualize-data/visualization-types/graph/#x-axis) + - Upload CSV files through the InfluxDB UI + - [Edit Telegraf configurations](/influxdb/v2.3/telegraf-configs/update/#edit-the-configuration-file-directly-in-the-ui) in the InfluxDB UI + - [Legend orientation options](/influxdb/v2.3/visualize-data/visualization-types/graph/#legend) + - [Refresh a single dashboard cell](/influxdb/v2.3/visualize-data/dashboards/control-dashboard/#refresh-a-single-dashboard-cell) +- Upgrade to **Flux v0.113.0**. + +### Bug Fixes +- Prevent "do not have an execution context" error when parsing Flux options in tasks. +- Fix swagger to match implementation of DBRPs type. +- Fix use-after-free bug in series ID iterator. +- Fix TSM and WAL segment size check to check against the local `SegmentSize`. +- Fix TSM and WAL segment size computing to correctly calculate `totalOldDiskSize`. +- Update references to the documentation site site to use current URLs. +- Fix data race in then TSM engine when inspecting tombstone statistics. +- Fix data race in then TSM cache. +- Deprecate misleading `retentionPeriodHrs` key in the onboarding API. +- Fix Single Stat graphs with thresholds crashing on negative values. +- Fix InfluxDB port in Flux function UI examples. +- Remove unauthenticated, unsupported `/debug/vars` HTTP endpoint. +- Respect 24 hour clock formats in the InfluxDB UI and add more format choices. +- Prevent "do not have an execution context" error when parsing Flux options in tasks. +- Prevent time field names from being formatted in the Table visualization. +- Log error details when `influxd upgrade` fails to migrate databases. +- Fix the cipher suite used when TLS strict ciphers are enabled in `influxd`. +- Fix parse error in UI for tag filters containing regular expression meta characters. +- Prevent concurrent access panic when gathering bolt metrics. +- Fix race condition in Flux controller shutdown. +- Reduce lock contention when adding new fields and measurements. +- Escape dots in community templates hostname regular expression. + +## v2.0.4 General Availability [2021-02-04] + +### Docker + +#### ARM64 +This release extends the Docker builds hosted in `quay.io` to support the Linux/ARM64 platform. + +#### 2.x nightly images +Prior to this release, competing nightly builds caused the nightly Docker tag to contain outdated binaries. This conflict is fixed, and the image tagged with nightly now contains 2.x binaries built from the `HEAD` of the `master` branch. + +### Breaking Changes + +#### `inmem` index option removed + +This release fully removes the `inmem` indexing option, along with the associated config options: +- `max-series-per-database` +- `max-values-per-tag` + +The startup process automatically generates replacement `tsi1` indexes for shards that need it. + +### Features + +#### `influxd` updates +- Add new [`influxd upgrade`](/influxdb/v2.3/reference/cli/influxd/upgrade/) flag `—overwrite-existing-v2` to overwrite existing files at output paths (instead of aborting). +- Add new configuration options: + - [`nats-port`](/influxdb/v2.3/reference/config-options/#nats-port) + - [`nats-max-payload-bytes`](/influxdb/v2.3/reference/config-options/#nats-max-payload-bytes) +- Add new commands: + - Add [`influxd print-config`](/influxdb/v2.3/reference/cli/influxd/print-config/) to support automated configuration inspection. + - Add [`influxd inspect export-lp`](/influxdb/v2.3/reference/cli/influxd/inspect/export-lp/) to extract data in line-protocol format. + +#### New Telegraf plugins in UI +- Update Telegraf plugins list in UI to include Beat, Intel PowerStats, and Rienmann. + +#### Performance improvements +- Promote schema and fill query optimizations to default behavior. + +#### Flux updates +- Upgrade to [Flux v0.104.0](/{{< latest "flux" >}}/release-notes/#v0-104-0-2021-02-02). +- Upgrade to `flux-lsp-browser` v0.5.31. + +### Bug Fixes + +- Standardize binary naming conventions. +- Fix configuration loading issue. +- Add Flux dictionary expressions to Swagger documetnation. +- Ensure `influxdb` service sees default environment variables when running under `init.d`. +- Remove upgrade notice from new installs. +- Ensure `config.toml` is initialized on new installs. +- Include upgrade helper script (`influxdb2-upgrade.sh`) in GoReleaser manifest. +- Prevent `influx stack update` from overwriting stack name and description. +- Fix timeout setup for `influxd` graceful shutdown. +- Require user to set password during initial user onboarding. +- Error message improvements: + - Remove duplication from task error messages. + - Improve error message shown when influx CLI can't find an `org` by name. + - Improve error message when opening BoltDB with unsupported file system options. + - Improve messages in DBRP API validation errors. +- `influxd upgrade` improvements: + - Add confirmation step with file sizes before copying data files. + - Prevent panic in `influxd upgrade` when v1 users exist but v1 config is missing. +- Fix logging initialization for storage engine. +- Don't return 500 codes for partial write failures. +- Don't leak `.tmp` files while backing up shards. +- Allow backups to complete while a snapshot is in progress. +- Fix silent failure to register CLI arguments as required. +- Fix loading when `INFLUXD_CONFIG_PATH` points to a .yml file. +- Prevent extra output row from GROUP BY crossing DST boundary. +- Update Flux functions list in UI to reflect that `v1` package was renamed to `schema`. +- Set correct `Content-Type` on v1 query responses. +- Respect the `--skip-verify` flag when running `influx query`. +- Remove blank lines from payloads sent by `influx write`. +- Fix infinite loop in Flux parser caused by invalid array expressions. +- Support creating users without initial passwords in `influx user create`. +- Fix incorrect errors when passing `--bucket-id` to `influx write`. + +## v2.0.3 General Availability [2020-12-14] + +### Breaking Changes + +#### `influxd upgrade` + +Previously, `influxd upgrade` would attempt to write upgraded `config.toml` files into the same directory as the source +`influxdb.conf` file. If this failed, a warning would be logged and `config.toml` would write into the `home` directory of the user who launched the upgrade. + +This release breaks this behavior in two ways: + +- By default, `config.toml` writes into the same directory as the Bolt DB and engine files (`~/.influxdbv2/`) +- If writing upgraded config fails, the `upgrade` process exits with an error instead of falling back to the `HOME` directory + +To override the default configuration path (`~/.influxdbv2/`), use the new `--v2-config-path` option to specify the output path to the v2 configuration file (`config.toml`). For details, see [Upgrade from InfluxDB 1.x to InfluxDB 2.0](/influxdb/v2.3/upgrade/v1-to-v2/). + +#### InfluxDB v2 packaging + +We've renamed the InfluxDB v2 DEB and RPM packages to clarify versions. The package name is now `influxdb2` and conflicts with any previous `influxdb` package (including initial 2.0.0, 2.0.1, and 2.0.2 packages). + +This release also defines v2-specific path defaults and provides [helper scripts](https://github.com/influxdata/influxdb/blob/master/scripts/influxdb2-upgrade.sh) for `influxd upgrade` and cleanup cases. + +### Features + +- Allow password to be specified as a CLI option in [`influx v1 auth create`](/influxdb/cloud/reference/cli/influx/auth/create/). +- Allow password to be specified as a CLI option in [`influx v1 auth set-password`](/influxdb/cloud/reference/cli/influx/auth/). +- Implement [delete with predicate](/influxdb/v2.3/write-data/delete-data/). +- Improve ID-related error messages for `influx v1 dbrp` commands. +- Update Flux to [v0.99.0](/{{< latest "flux" >}}/release-notes/#v0-99-0-2020-12-15). +- Update `flux-lsp-browser` to v0.5.25. +- Support for ARM64 preview build. + +### Bug Fixes + +- Don't log bodies of v1 write request bodies. +- Fix panic when writing a point with 100 or more tags. +- Fix validation of existing DB names when creating DBRP mappings. +- Enforce max value of 2147483647 on query concurrency to avoid startup panic. +- Automatically migrate existing DBRP mappings from old schema to avoid panic. +- Optimize shard lookup in groups containing only one shard. +- Always respect the `--name` option in `influx setup`. +- Allow for 0 (infinite) values for `--retention` in `influx setup`. +- Fix panic when using a `null` value as a record or array in a Flux query. + +## v2.0.2 General Availability [2020-11-19] + +### Breaking changes + +#### DBRP HTTP API now matches Swagger documentation + +Previously, the database retention policy (DBRP) mapping API did not match the swagger spec. If you're using scripts based on the previous implementation instead of the swagger spec, you'll need to either update them or use the new [DBRP CLI commands](/influxdb/v2.3/reference/cli/influx/v1/dbrp/) instead. + +### Features +- Improvements to upgrade from 1.x to 2.x: + - Warning appears if auth is not enabled in 1.x (`auth-enabled = false`), which is not an option in 2.x. For details, see [Upgrade from InfluxDB 1.x to InfluxDB 2.0](/influxdb/v2.3/upgrade/v1-to-v2/). + - `upgrade` command now checks to see if continuous queries are running and automatically exports them to a local file. +- Upgrade to [Flux v0.95.0](/{{< latest "flux" >}}/release-notes/#v0-95-0-2020-11-17). +- Upgrade `flux-lsp-browser` to v.0.5.23. +- Manage database retention policy (DBRP) mappings via CLI. See [`influx v1 dbrp`](/influxdb/v2.3/reference/cli/influx/v1/dbrp/). +- Filter task runs by time. + +### Bug Fixes +- Fixes to `influx upgrade` command: + - Remove internal subcommands from help text. + - Validate used input paths upfront. +- Add locking during TSI iteration creation. +- Fix various typos. +- Use `--skip-verify` flag for backup/restore CLI command. This is passed to the underlying HTTP client for the `BackupService` and `RestoreService` to support backup and restore on servers with self-signed certificates. +- Don't automatically print help on `influxd` errors. +- Add `SameSite=Strict` flag to session cookie. +- Ensure `SHOW DATABASES` returns a list of the unique databases only. +- Allow scraper to ignore insecure certificates on an endpoint. +- Ensure Flux reads across all shards. +- Use the associated default retention policies defined within the DBRP mapping if no retention policy is specified as part of a v1 write API call. +- Add locking during TSI iterator creation. +- Allow self-signed certificates for scraper targets. +- Bump version in `package.json` so it appears correctly. + +## v2.0.1 General Availability [2020-11-10] + +InfluxDB 2.0 general availability (GA) introduces the first **production-ready** open source version of InfluxDB 2.0. This release comprises all features and bug fixes included in prior alpha, beta, and release candidate versions. + +{{% note %}} +#### Known issues + +##### Delete with predicate API not implemented + +The delete with predicate API (`/api/v2/delete`) has not been implemented and currently returns a `501 Not implemented` message. This API will be implemented post GA. + +##### Duplicate DBRP mappings per database + +When there are multiple [DBRP mappings](/influxdb/v2.3/reference/api/influxdb-1x/dbrp/) with the same database name in InfluxDB 1.x, SHOW DATABASES incorrectly returns duplicates. +{{% /note %}} + +Highlights include: + +- Support for **upgrading to InfluxDB 2.0**: + - To upgrade **from InfluxDB 1.x**, see [Upgrade from InfluxDB 1.x to InfluxDB 2.0](/influxdb/v2.3/upgrade/v1-to-v2). + - To upgrade **from InfluxDB 2.0 beta 16 or earlier**, see [Upgrade from InfluxDB 2.0 beta to InfluxDB 2.0](/influxdb/v2.3/upgrade/v2-beta-to-v2). +- **Flux**, our powerful new functional data scripting language designed for querying, analyzing, and acting on data. This release includes [Flux v0.94.0](/{{< latest "flux" >}}/release-notes/#v0-94-0-2020-11-09). If you're new to Flux, [check out how to get started with Flux](/influxdb/v2.3/query-data/get-started/). Next, delve deeper into the [Flux standard library](/{{< latest "flux" >}}/stdlib//) reference docs and see how to [query with Flux](/influxdb/v2.3/query-data/flux/). +- Support for [InfluxDB 1.x API compatibility](/influxdb/v2.3/reference/api/influxdb-1x/). +- **Templates** and **stacks**. Discover how to [use community templates](/influxdb/v2.3/influxdb-templates/use/) and how to [manage templates with stacks](/influxdb/v2.3/influxdb-templates/stacks/). + +If you're new to InfluxDB 2.0, we recommend checking out [how to get started](/influxdb/v2.3/get-started/) and [InfluxDB key concepts](/influxdb/v2.3/reference/key-concepts/). + +## v2.0.0 [2020-11-09] + +### Features +- Improve UI for v1 `influx auth` commands. +- Upgrade to [Flux v0.94.0](/{{< latest "flux" >}}/release-notes/#v0-94-0-2020-11-10) +- Upgrade `flux-lsp-browser` to v0.5.22. +- Add [RAS Telegraf input plugin](/telegraf/v1.16/plugins//#ras). + +### Bug Fixes + +- Remove unused `security-script` option from `influx upgrade` command. +- Fix parsing of retention policy CLI arguments in `influx setup` and `influxd upgrade`. +- Create CLI configs during upgrade to v2. +- Allow write-only v1 tokens to find database retention policies (DBRPs). +- Update `v1 auth` description. +- Use `db`/`rp` naming convention when migrating databases to buckets. +- Improve help text for `influxd` and `--no-password` switch. +- Use `10` instead of `MaxInt` when rewriting query-concurrency. +- Remove bucket and mapping auto-creation from `/write` 1.x compatibility API. +- Fix misuse of `reflect.SliceHeader`. + +## v2.0.0-rc.4 [2020-11-05] + +### Features + +- Upgrade to [Flux v0.93.0](/{{< latest "flux" >}}/release-notes/#v0-93-0-2020-11-02). +- Add `influx backup` and `influx restore` CLI commands to support backing up and restoring data in InfluxDB 2.0. +- Add the `v1/authorization` package to support authorizing requests to the InfluxDB 1.x API. + +### Bug Fixes + +- Add a new `CreateUniquePhysicalNode` method, which reads and applies the plan node ID in context. Each physical node has a unique ID to support planner rules applied more than once in a query. Previously, the same node ID (hence the same dataset ID) caused the execution engine to generate undefined results. +- A cloned task is now only activated when you select **Active**. Previously, a cloned task was activated if the original task was activated. +- Reduce the `influx` binary file size. +- Isolate the `TelegrafConfigService` and remove URM interactions. +- Use the updated HTTP client for the authorization service. +- Make `tagKeys` and `tagValues` work for edge cases involving fields. +- Correctly parse float as 64-bits. +- Add simple metrics related to installed templates. +- Remove extra multiplication of retention policies in onboarding. +- Use the `fluxinit` package to initialize the Flux library instead of builtin. +- Add Logger to the constructor function to ensure the log field is initialized. +- Return an empty iterator instead of null in `tagValues`. +- Fix the `/ready` response content type to return `application/json`. + +## v2.0.0-rc.3 [2020-10-29] + +### Features + +- Upgrade to [Flux v0.91.0](/{{< latest "flux" >}}/release-notes/#v0910-2020-10-26). +- Enable window aggregate mean pushdown. +- Add `newMultiShardArrayCursors` to aggregate array cursors. +- UI updates: + - Upgrade `papaparse` to 5.2.0. + - Upgrade `flux-lsp-browser` to v0.5.21. + - Add properties for storing your tick generation selections, including a `generateAxisTicks` property to turn this feature on and off. + - Update generate ticks into an array of properties for each axis. + - Add the `legendColorizeRows` property to toggle the color on and off in the legend. + +### Bug Fixes + +- Resolve `invalid operation: fs.Bavail` error that occurred in some cases using when `DiskUsage()`. Now, `fs.Bavail` is always converted to `unit64` to ensure the types in an expression align. +- Refactor notifications to isolate the `notification/endpoint/service` package and move the rule service into its own package. +- Update to clear log out. +- Refactor to allow `newIndexSeriesCursor()` to accept an `influxql.Expr`. +- Remove unreferenced packages. + +## v2.0.0-rc.2 [2020-10-22] + +### Features + +- Upgrade to [Flux v0.90.0](/{{< latest "flux" >}}/release-notes/#v0900-2020-10-19). +- Add `--force` option to the `influx stacks rm` command, which lets you remove a stack without the confirmation prompt. +- Add `aggregate_resultset` for mean aggregate pushdown to optimize windowed results. +- Return an error if adding a resource to a stack (`influx stacks update --addResource`) fails due to an invalid resource type or resource ID. + +### Bug Fixes + +- Update `pkger` test templates to use valid Flux to avoid `found unexpected argument end` error. Previously, any template with a `v.dashboardVariable` returned an `undefined identifier v` error. +- Update the InfluxDB configuration file `/etc/influxdb/influxdb.conf` to recognize the user's home directory. Previously, if a user (other than root user) ran the `upgrade` command, a permissions error occurred. +- Remove the Telegraf RAS Daemon plugin and other miscellaneous Telegraf plugin updates. +- Update the `derivative` in the InfluxDB UI (`ui/src/timeMachiner`) to specify the `unit` is one second (`1s`). +- Enable the new `AuthorizationService` from authorization package in the `launcher` package (`cmd\influxd\launcher`). +- Update `config upgrade` to save the correct InfluxDB configuration filename. + +## v2.0.0-rc.1 [2020-10-14] + +### Features +- Add [`influx upgrade`](/influxdb/v2.3/reference/cli/influxd/upgrade/) command for upgrading from 1.x to 2.0. +- Upgrade to Flux v0.89.0. + +### Bug Fixes +- Enable scrapers. (Scrapers did not work in rc0.) +- Update default number of tasks listed with `influx task list` to 100. +- Add support for [duration unit identifiers](/{{< latest "flux" >}}/spec/lexical-elements/#duration-literals) to templates. +- Preserve cell colors in imported and exported templates. +- Resolve issue to ensure the `influx` CLI successfully returns a single Telegraf configuration. +- Ensure passwords are at least 8 characters in `influx setup`. + +## v2.0.0-rc.0 [2020-09-29] + +{{% warn %}} +#### Manual upgrade required + +To simplify the migration for existing users of InfluxDB 1.x, this release includes significant breaking changes that require a manual upgrade from all alpha and beta versions. For more information, see [Upgrade to InfluxDB OSS 2.0rc](/influxdb/v2.3/reference/upgrading/rc-upgrade-guide/), +{{% /warn %}} + +### Breaking changes + +#### Manual upgrade + +- To continue using data from InfluxDB 2.0 beta 16 or earlier, you must move all existing data out of the `~/.influxdbv2` (or equivalent) path, including `influxd.bolt`. All existing dashboards, tasks, integrations, alerts, users, and tokens must be recreated. For information on how to migrate your data, see [Upgrade to InfluxDB OSS 2.0rc](/influxdb/v2.3/reference/upgrading/rc-upgrade-guide/). + +#### Port update to 8086 + +- Change the default port of InfluxDB from 9999 back to 8086. If you would still like to run on port 9999, you can start `influxd` with the `--http-bind-address` option. You must also [update any InfluxDB CLI configuration profiles](/influxdb/v2.3/reference/cli/influx/config/set/) with the new port number. + +#### Support for 1.x storage engine and InfluxDB 1.x compatibility API + +- Port the TSM1 storage engine. This change supports a multi-shared storage engine and InfluxQL writes and queries using the InfluxDB 1.x API compatibility [`/write`](/influxdb/v2.3/reference/api/influxdb-1x/write/) and [`/query`](/influxdb/v2.3/reference/api/influxdb-1x/query/) endpoints. + +#### Disable delete with predicate API + +- Disable the delete with predicate API (`/api/v2/delete`). This API now returns a `501 Not implemented` message. + +### Features + +#### Load Data redesign + +- Update the Load Data page to increase discovery and ease of use. Now, you can [load data from sources in the InfluxDB user interface](/influxdb/v2.3/write-data/no-code/load-data/). + +#### Community templates added to InfluxDB UI + +- Add [InfluxDB community templates](/influxdb/v2.3/influxdb-templates/) directly in the InfluxDB user interface (UI). + +#### New data sources + +- Add InfluxDB v2 Listener, NSD, OPC-UA, and Windows Event Log to the Sources page. + +#### CLI updates + +- Add option to print raw query results in [`influx query`](/influxdb/v2.3/reference/cli/influx/query/). +- Add ability to export resources by name using [`influx export`](/influxdb/v2.3/reference/cli/influx/export/). +- Add new processing options and enhancements to [`influx write`](/influxdb/v2.3/reference/cli/influx/write/). +- Add `--active-config` flag to [`influx` commands](/influxdb/v2.3/reference/cli/influx/#commands) to set the configuration for a single command. +- Add `influxd`[configuration options](/influxdb/v2.3/reference/config-options/#configuration-options) for storage options and InfluxQL coordinator tuning. +- Add `max-line-length` switch to the [`influx write`](/influxdb/v2.3/reference/cli/influx/write/) command to address `token too long errors` for large inputs. + +#### API updates + +- List buckets in the API now supports the `after` (ID) parameter as an alternative to `offset`. + +#### Task updates + +- Record last success and failure run times in the task. +- Inject the task option `latestSuccessTime` in Flux `Extern`. + +### Bug Fixes + +- Add description to [`influx auth`](/influxdb/v2.3/reference/cli/influx/auth/) command outputs. +- Resolve issues with check triggers in notification tasks by including the edge of the observed boundary. +- Detect and provide warning about duplicate tag names when writing CSV data using `influx write`. +- Ensure the group annotation does not override the existing line part (measurement, field, tag, time) in a CSV group annotation. +- Added `PATCH` to the list of allowed methods. + +## v2.0.0-beta.16 [2020-08-06] + +{{% warn %}} +This release includes breaking changes: +- Remove `influx repl` command. To use the Flux REPL, build the REPL from source. +- Drop deprecated `/packages` route tree. +- Support more types for template `envRef` default value and require explicit default values. +- Remove orgs/labels nested routes from the API. +{{% /warn %}} + +### Features + +- Add resource links to a stack's resources from public HTTP API list/read calls. +- Enhance resource creation experience when limits are reached. +- Add `dashboards` command to `influx` CLI. +- Allow user onboarding to optionally set passwords. +- Limit query response sizes for queries built in QueryBuilder by requiring an aggregate window. + +### Bug Fixes + +- Require all `influx` CLI flag arguments to be valid. +- Dashboard cells correctly map results when multiple queries exist. +- Dashboard cells and overlay use UTC as query time when toggling to UTC timezone. +- Bucket names may not include quotation marks. + +### UI Improvements + +- Alerts page filter inputs now have tab indices for keyboard navigation. + +## v2.0.0-beta.15 [2020-07-23] + +### Features + +- Add event source to stacks. +- Add ability to uninstall stacks. +- Drop deprecated `influx pkg` commands. +- Add Telegraf management commands to `influx` CLI. +- Enable dynamic destination for the `influx` CLI configuration file. + +### Bug Fixes + +- Allow 0 to be the custom set minimum value for y domain. +- Single Stat cells render properly in Safari. +- Limit variable querying when submitting queries to used variables. + +## v2.0.0-beta.14 [2020-07-08] + +### Features + +- Extend `influx stacks update` command with ability to add resources without apply template. +- Consolidate all InfluxDB template and stack functionality into two new public APIs: `/api/v2/templates` and `/api/v2/stacks`. +- Extend template `Summary` and `Diff` nested types with `kind` identifiers. +- Add static builds for Linux. +- Update Flux to v.0.71.1. + +### Bug Fixes + +- Don't overwrite build date set via `ldflags`. +- Fix issue where define query was unusable after importing a Check. +- Update documentation links + +## v2.0.0-beta.13 [2020-06-25] + +### Features + +- Cancel submitted queries in the Data Explorer. +- Extend templates with the source `file|url|reader`. +- Collect stats on installed InfluxData community template usage. +- Allow raw `github.com` host URLs for `yaml|json|jsonnet` URLs in InfluxDB templates. +- Allow for remote files for all `influx template` commands. +- Extend stacks API with update capability. +- Add support for config files to `influxd` and any `cli.NewCommand` use case. +- Extend `influx stacks` command with new `influx stacks update` command. +- Skip resources in a template by kind or by `metadata.name`. +- Extend `influx apply` with resource filter capabilities. +- Provide active configuration when running `influx config` without arguments. +- Enable `influxd` binary to look for a configuration file on startup. +- Add environmental default values to the template parser. +- Templates will store which dashboard variable should be selected by default. + +### Bug Fixes + +- Fix `uint` overflow during setup on 32bit systems. +- Drop support for `--local` flag within `influx` CLI. +- Fix issue where undefined queries in cells result in error in dashboard. +- Add support for day and week time identifiers in the CLI for bucket and setup commands. +- Cache dashboard cell query results to use as a reference for cell configurations. +- Validate `host-url` for `influx config create` and `influx config set` commands. +- Fix `influx` CLI flags to accurately depict flags for all commands. + +## v2.0.0-beta.12 [2020-06-12] + +### Features + +- Add option for Cloud users to use the `influx` CLI to interact with a Cloud instance. For more information, see how to [download and install the influx CLI](/influxdb/v2.3/get-started/) and then learn more about how the [influx - InfluxDB command line interface](/influxdb/v2.3/reference/cli/influx/) works. +- Consolidate `influx apply` commands under templates. Remove some nesting of the `influx` CLI commands. +- Make all `influx apply` applications stateful through stacks. +- Add ability to export a stack's existing resource state using `influx export`. +- Update `influx apply` commands with improved usage and examples in long form. +- Update `influx` CLI to include the `-version` command and return the User-Agent header. +- Add `RedirectTo` functionality to ensure Cloud users are redirected to the page that they were trying access after logging into Cloud. +- Maintain sort order on a dashboard after navigating away. +- Allow tasks to open in new tabs. + +### Bug Fixes + +- Support organization name and ID in DBRP operations. +- Prevent the CLI from failing when an unexpected flag is entered in the CLI. +- `influx delete` now respects the configuration settings. +- Store initialization for `pkger` enforced on reads. +- Backfill missing `fillColumns` field for histograms in `pkger`. +- Notify the user how to escape presentation mode when the feature is toggled. + +### UI Improvements + +- Display bucket ID in bucket list and enable 1-click copying. +- Update Tokens list to be consistent with other resource lists. +- Reduce the number of variables being hydrated when toggling variables. +- Redesign dashboard cell loading indicator to be more obvious. + +## v2.0.0-beta.11 [2020-05-27] + +{{% warn %}} +The beta 11 version was **not released**. Changes below are included in the beta 12 release. +{{% /warn %}} + +### Features + +- Ability to set UTC time for a custom time range query. +- Ability to set a minimum or maximum value for the y-axis visualization setting (rather than requiring both). +- New `csv2lp` library for converting CSV (comma separated values) to InfluxDB line protocol. +- Add influxdb version to the InfluxDB v2 API `/health` endpoint. + +### Bug Fixes + +- Automatically adjust the drop-down list width to ensure the longest item in a list is visible. +- Fix bug in Graph + Single Stat visualizations to ensure `timeFormat` persists. +- Authorizer now exposes the full permission set. This adds the ability to derive which organizations the Authorizer has access to read or write to without using a User Request Management (URM) service. +- Fix issue causing variable selections to hydrate all variable values, decreasing the impact on network requests. +- Resolve scrollbar issues to ensure datasets are visible and scrollable. +- Check status now displays a warning if loading a large amount. + +## v2.0.0-beta.10 [2020-05-07] + +### Features + +- Add ability to delete a stack and all associated resources. +- Enforce DNS name compliance on the `metadata.name` field in all `pkger` resources. +- Add stateful `pkg` management with stacks. + +### Bug Fixes + +- Ensure `UpdateUser` cleans up the index when updating names. +- Ensure Checks can be set for zero values. + +### UI Improvements + +- Create buckets in the Data Explorer and Cell Editor. + +--- + +## v2.0.0-beta.9 [2020-04-23] + +### Bug Fixes +- Add index for URM by user ID to improve lookup performance. +- Existing session expiration time is respected on session renewal. +- Make CLI respect environment variables and flags and extend support for config orgs to all commands. + +### UI Improvements +- Update layout of alerts page to work on all screen sizes. +- Sort dashboards on Getting Started page by recently modified. +- Add single-color schemes for visualizations: Solid Red, Solid Blue, Solid Yellow, Solid Green, and Solid Purple. + +--- + +## v2.0.0-beta.8 [2020-04-10] + +### Features +- Add `influx config` CLI command to switch back to previous activated configuration. +- Introduce new navigation menu. +- Add `-file` option to `influx query` and `influx task` CLI commands. +- Add support for command line options to limit memory for queries. + +### Bug Fixes +- Fix card size and layout issues in dashboards index view. +- Fix check graph font and lines defaulting to black causing graph to be unreadable +- Fix text-wrapping display issue and popover sizing bug when adding labels to a resource. +- Respect the now-time of the compiled query if provided. +- Fix spacing between ticks. +- Fix typos in Flux functions list. + +### UI Improvements +- Update layout of Alerts page to work on all screen sizes. +- Sort dashboards on Getting Started page by recently modified. + +--- + +## v2.0.0-beta.7 [2020-03-27] + +### Features + +- Add option to display dashboards in [light mode](/influxdb/v2.3/visualize-data/dashboards/control-dashboard/#toggle-dark-mode-and-light-mode). +- Add [shell `completion` commands](/influxdb/v2.3/reference/cli/influx/completion/) to the `influx` CLI. + specified shell (`bash` or `zsh`). +- Make all `pkg` resources unique by `metadata.name` field. +- Ensure Telegraf configuration tokens aren't retrievable after creation. New tokens can be created after Telegraf has been setup. +- [Delete bucket by name](/influxdb/v2.3/organizations/buckets/delete-bucket/#delete-a-bucket-by-name) using the `influx` CLI. +- Add helper module to write line protocol to specified url, org, and bucket. +- Add [`pkg stack`](/influxdb/v2.3/reference/cli/influx/stacks) for stateful package management. +- Add `--no-tasks` flag to `influxd` to disable scheduling of tasks. +- Add ability to output CLI output as JSON and hide table headers. +- Add an [easy way to switch configurations](/influxdb/v2.3/reference/cli/influx/config/#quickly-switch-between-configurations) using the `influx` CLI. + +### Bug fixes + +- Fix NodeJS logo display in Firefox. +- Fix Telegraf configuration bugs where system buckets were appearing in the Buckets list. +- Fix threshold check bug where checks could not be created when a field had a space in the name. +- Reuse slices built by iterator to reduce allocations. +- Updated duplicate check error message to be more explicit and actionable. + +### UI improvements + +- Redesign OSS Login page. +- Display graphic when a dashboard has no cells. + +--- + +## v2.0.0-beta.6 [2020-03-12] + +### Features +- Clicking on bucket name takes user to Data Explorer with bucket selected. +- Extend pkger (InfluxDB Templates) dashboards with table view support. +- Allow for retention to be provided to `influx setup` command as a duration. +- Extend `influx pkg export all` capabilities to support filtering by lable name and resource type. +- Added new login and sign-up screen for InfluxDB Cloud users that allows direct login from their region. +- Added new `influx config` CLI for managing multiple configurations. + +### Bug Fixes +- Fixed issue where tasks were exported for notification rules. +- Fixed issue where tasks were not exported when exporting by organization ID. +- Fixed issue where tasks with imports in the query would break in pkger. +- Fixed issue where selecting an aggregate function in the script editor did not + add the function to a new line. +- Fixed issue where creating a dashboard variable of type "map" piped the incorrect + value when map variables were used in queries. +- Added missing usernames to `influx auth` CLI commands. +- Disabled group functionality for check query builder. +- Fixed cell configuration error that popped up when users created a dashboard + and accessed the "Disk Usage" cell for the first time. +- Listing all the default variables in the Variable tab of the script editor. +- Fixed bug that prevented the interval status on the dashboard header from + refreshing on selections. +- Updated table custom decimal feature for tables to update on focus. +- Fixed UI bug that set Telegraf config buttons off-center and resized config + selections when filtering through the data. +- Fixed UI bug that caused dashboard cells to error when using `v.bucket` for the first time. +- Fixed appearance of client library logos in Safari. +- Fixed UI bug that prevented checks created with the query builder from updating. +- Fixed a bug that prevented dashboard cell queries from working properly when + creating group queries using the query builder. + +### UI Improvements +- Swap `billingURL` with `checkoutURL`. +- Move Cloud navigation to top of page instead of within left side navigation. +- Adjust aggregate window periods to use duration input with validation. + +--- + +## v2.0.0-beta.5 [2020-02-27] + +### Features +- Update Flux to v0.61.0. +- Add secure flag to session cookie. +- Add optional secret value flag to `influx secret` command. + +### Bug Fixes +- Sort dashboards on homepage alphabetically. +- Tokens page now sorts by status. +- Set the default value of tags in a check. +- Fix sort by variable type. +- Calculate correct stacked line cumulative when lines are different lengths. +- Resource cards are scrollable. +- Query Builder groups on column values, not tag values. +- Scatterplots render tooltips correctly. +- Remove pkger gauge chart requirement for color threshold type. +- Remove secret confirmation from `influx secret update`. + +--- + +## v2.0.0-beta.4 [2020-02-14] + +### Features +- Added labels to buckets. +- Connect Monaco Editor to Flux LSP server. +- Update Flux to v0.59.6. + +### Bug Fixes +- Revert bad indexing of `UserResourceMappings` and `Authorizations`. +- Prevent gauge visualization from becoming too small. + +--- + +## v2.0.0-beta.3 [2020-02-11] + +### Features +- Extend `influx cli pkg command` with ability to take multiple files and directories. +- Extend `influx cli pkg command` with ability to take multiple URLs, files, + directories, and stdin at the same time. +- `influx` CLI can manage secrets. + +### Bug Fixes +- Fix notification rule renaming panics in UI. +- Fix the tooltip for stacked line graphs. +- Fixed false success notification for read-only users creating dashboards. +- Fix issue with pkger/http stack crashing on duplicate content type. + +--- + +## v2.0.0-beta.2 [2020-01-24] + +### Features +- Change Influx packages to be CRD compliant. +- Allow trailing newline in credentials file and CLI integration. +- Add support for prefixed cursor search to ForwardCursor types. +- Add backup and restore. +- Introduce resource logger to tasks, buckets and organizations. + +### Bug Fixes +- Check engine closed before collecting index metrics. +- Reject writes which use any of the reserved tag keys. + +### UI Improvements +- Swap `billingURL` with `checkoutURL`. +- Move Cloud navigation to top of page instead of within left side navigation. +- Adjust aggregate window periods to use duration input with validation. + +--- + +## v2.0.0-beta.1 [2020-01-08] + +### Features +- Add support for notification endpoints to `influx` templates and packages. +- Drop `id` prefix for secret key requirement for notification endpoints. +- Add support for check resource to `pkger` parser. +- Add support for check resource `pkger` dry run functionality +- Add support for check resource `pkger` apply functionality +- Add support for check resource `pkger` export functionality +- Add new `kv.ForwardCursor` interface. +- Add support for notification rule to `pkger` parser. +- Add support for notification rule `pkger` dry run functionality +- Add support for notification rule `pkger` apply functionality. +- Add support for notification rule `pkger` export functionality. +- Add support for tasks to `pkger` parser. +- Add support for tasks to `pkger` dry run functionality +- Add support for tasks to `pkger` apply functionality. +- Add support for tasks to `pkger` export functionality. +- Add `group()` to Query Builder. +- Add last run status to check and notification rules. +- Add last run status to tasks. +- Extend `pkger` apply functionality with ability to provide secrets outside of package. +- Add hide headers flag to `influx` CLI task find command. +- Manual overrides for readiness endpoint. +- Drop legacy inmem service implementation in favor of KV service with inmem dependency. +- Drop legacy bolt service implementation in favor of KV service with bolt dependency. +- While creating check, also display notification rules that would match check based on tag rules. +- Increase default bucket retention period to 30 days. +- Add toggle to table thresholds to allow users to choose between setting threshold colors to text or background. +- Add developer documentation. +- Capture User-Agent header as query source for logging purposes. + +### Bug Fixes +- Ensure environment variables are applied consistently across command and fixes issue where `INFLUX_` environment variable prefix was not set globally. +- Remove default frontend sorting when flux queries specify sorting. +- Store canceled task runs in the correct bucket. +- Update `sortby` functionality for table frontend sorts to sort numbers correctly. +- Prevent potential infinite loop when finding tasks by organization. +- Retain user input when parsing invalid JSON during import. +- Fix test issues due to multiple flush/sign-ins being called in the same test suite. +- Update `influx` CLI to show only "see help" message, instead of the whole usage. +- Fix notification tag-matching rules and enable tests to verify. +- Extend y-axis when stacked graph is selected. +- Fix query reset bug that was resetting query in script editor whenever dates were changed. +- Fix table threshold bug defaulting set colors to the background. +- Time labels no longer squished to the left. +- Fix underlying issue with disappearing queries made in Advanced Mode. +- Prevent negative zero and allow zero to have decimal places. +- Limit data loader bucket selection to non system buckets. + +### UI Improvements +- Add honeybadger reporting to create checks. + +--- + +## v2.0.0-alpha.21 [2019-12-13] + +### Features +- Add stacked line layer option to graphs. +- Annotate log messages with trace ID, if available. +- Bucket create to accept an organization name flag. +- Add trace ID response header to query endpoint. + +### Bug Fixes +- Allow table columns to be draggable in table settings. +- Light up the home page icon when active. +- Make numeric inputs first class citizens. +- Prompt users to make a dashboard when dashboards are empty. +- Remove name editing from query definition during threshold check creation. +- Wait until user stops dragging and releases marker before zooming in after threshold changes. +- Adds properties to each cell on `GET /dashboards/{dashboardID}`. +- Gracefully handle invalid user-supplied JSON. +- Fix crash when loading queries built using the query builder. +- Create cell view properties on dashboard creation. +- Update scrollbar style. +- Fixed table UI threshold colorization issue. +- Fixed windowPeriod issue that stemmed from Webpack rules. +- Added delete functionality to note cells so that they can be deleted +- Fix failure to create labels when creating Telegraf configs +- Fix crash when editing a Telegraf config. +- Updated start/end time functionality so that custom script time ranges overwrite dropdown selections. + +--- + +## v2.0.0-alpha.20 [2019-11-20] + +### Features +- Add TLS insecure skip verify to influx CLI. +- Extend influx cli user create to allow for organization ID and user passwords to be set on user. +- Auto-populate organization IDs in the code samples. +- Expose bundle analysis tools for front end resources. +- Allow users to view just the output section of a Telegraf config. +- Allow users to see string data in single stat graph type. + +### Bug Fixes +- Fix long startup when running `influx help`. +- Mock missing Flux dependencies when creating tasks. +- Ensure array cursor iterator stats accumulate all cursor stats. +- Hide Members section in Cloud environments. +- Change how cloud mode is enabled. +- Merge front end development environments. +- Refactor table state logic on the front end. +- Arrows in tables show data in ascending and descending order. +- Sort by retention rules now sorts by second. +- Horizontal scrollbar no longer covering data; +- Allow table columns to be draggable in table settings. +- Light up the home page icon when active. +- Make numeric inputs first-class citizens. +- Prompt users to make a dashboard when dashboards are empty. +- Remove name editing from query definition during threshold check creation. +- Wait until user stops dragging and releases marker before zooming in after threshold changes. + +### UI Improvements +- Redesign cards and animations on Getting Started page. +- Allow users to filter with labels in Telegraf input search. + +--- + +## v2.0.0-alpha.19 [2019-10-30] + +### Features +- Add shortcut for toggling comments and submitting in Script Editor. + +### UI Improvements +- Redesign page headers to be more space-efficient. +- Add 403 handler that redirects back to the sign-in page on oats-generated routes. + +### Bug Fixes +- Ensure users are created with an active status. +- Added missing string values for `CacheStatus` type. +- Disable saving for threshold check if no threshold selected. +- Query variable selector shows variable keys, not values. +- Create Label overlay disables the submit button and returns a UI error if name field is empty. +- Log error as info message on unauthorized API call attempts. +- Ensure `members` and `owners` endpoints lead to 404 when organization resource does not exist. +- Telegraf UI filter functionality shows results based on input name. +- Fix Telegraf UI sort functionality. +- Fix task UI sort functionality. +- Exiting a configuration of a dashboard cell properly renders the cell content. +- Newly created checks appear on the checklist. +- Changed task runs success status code from 200 to 201 to match Swagger documentation. +- Text areas have the correct height. + +--- + +## v2.0.0-alpha.18 [2019-09-26] + +### Features +- Add jsonweb package for future JWT support. +- Added the JMeter Template dashboard. + +### UI Improvements +- Display dashboards index as a grid. +- Add viewport scaling to html meta for responsive mobile scaling. +- Remove rename and delete functionality from system buckets. +- Prevent new buckets from being named with the reserved `_` prefix. +- Prevent user from selecting system buckets when creating Scrapers, Telegraf configurations, read/write tokens, and when saving as a task. +- Limit values from draggable threshold handles to 2 decimal places. +- Redesign check builder UI to fill the screen and make more room for composing message templates. +- Move Tokens tab from Settings to Load Data page. +- Expose all Settings tabs in navigation menu. +- Added Stream and table functions to query builder. + +### Bug Fixes +- Remove scrollbars blocking onboarding UI step. + +--- + +## v2.0.0-alpha.17 [2019-08-14] + +### Features +- Optional gzip compression of the query CSV response. +- Add task types. +- When getting task runs from the API, runs will be returned in order of most recently scheduled first. + +### Bug Fixes +- Fix authentication when updating a task with invalid org or bucket. +- Update the documentation link for Telegraf. +- Fix to surface errors properly as task notifications on create. +- Fix limiting of get runs for task. + +--- + +## v2.0.0-alpha.16 [2019-07-25] + +### Bug Fixes +- Add link to documentation text in line protocol upload overlay. +- Fix issue in Authorization API, can't create auth for another user. +- Fix Influx CLI ignored user flag for auth creation. +- Fix the map example in the documentation. +- Ignore null/empty Flux rows which prevents a single stat/gauge crash. +- Fixes an issue where clicking on a dashboard name caused an incorrect redirect. +- Upgrade templates lib to 0.5.0. +- Upgrade giraffe lib to 0.16.1. +- Fix incorrect notification type for manually running a task. +- Fix an issue where canceled tasks did not resume. + +--- + +## v2.0.0-alpha.15 [2019-07-11] + +### Features +- Add time zone support to UI. +- Added new storage inspection tool to verify TSM files. + +### Bug Fixes +- Fix incorrect reporting of tasks as successful when errors occur during result iteration. + +#### Known Issues +The version of Flux included in Alpha 14 introduced `null` support. +Most issues related to the `null` implementation have been fixed, but one known issue remains – +The `map()` function panics if the first record processed has a `null` value. + +--- + +## v2.0.0-alpha.14 [2019-06-28] + +### Features +- Add `influxd inspect verify-wal` tool. +- Move to [Flux 0.34.2](/{{< latest "flux" >}}/release-notes/#v0342-2019-06-27) - + includes new string functions and initial multi-datasource support with `sql.from()`. +- Only click save once to save cell. +- Enable selecting more columns for line visualizations. + +### UI Improvements +- Draw gauges correctly on HiDPI displays. +- Clamp gauge position to gauge domain. +- Improve display of error messages. +- Remove rendering bottleneck when streaming Flux responses. +- Prevent variable dropdown from clipping. + +--- + +## v2.0.0-alpha.13 [2019-06-13] + +### Features +- Add static templates for system, Docker, Redis, Kubernetes. + +--- + +## v2.0.0-alpha.12 [2019-06-13] + +### Features +- Enable formatting line graph y ticks with binary prefix. +- Add x and y column pickers to graph types. +- Add option to shade area below line graphs. + +### Bug Fixes +- Fix performance regression in graph tooltips. + +--- + +## v2.0.0-alpha.11 [2019-05-31] + +### Bug Fixes +- Correctly check if columnKeys include xColumn in heatmap. + +--- + +## v2.0.0-alpha.10 [2019-05-30] + +### Features +- Add heatmap visualization type. +- Add scatterplot graph visualization type. +- Add description field to tasks. +- Add CLI arguments for configuring session length and renewal. +- Add smooth interpolation option to line graphs. + +### Bug Fixes +- Removed hardcoded bucket for Getting Started with Flux dashboard. +- Ensure map type variables allow for selecting values. +- Generate more idiomatic Flux in query builder. +- Expand tab key presses to two spaces in the Flux editor. +- Prevent dragging of variable dropdowns when dragging a scrollbar inside the dropdown. +- Improve single stat computation. +- Fix crash when opening histogram settings with no data. + +### UI Improvements +- Render checkboxes in query builder tag selection lists. +- Fix jumbled card text in Telegraf configuration wizard. +- Change scrapers in scrapers list to be resource cards. +- Export and download resource with formatted resource name with no spaces. + +--- + +## v2.0.0-alpha.9 [2019-05-01] + +{{% warn %}} +**This will remove all tasks from your InfluxDB v2.0 instance.** + +Before upgrading, [export all existing tasks](/influxdb/v2.3/process-data/manage-tasks/export-task/). After upgrading, [reimport your exported tasks](/influxdb/v2.3/process-data/manage-tasks/create-task/#import-a-task). +{{% /warn %}} + +### Features +- Set autorefresh of dashboard to pause if absolute time range is selected. +- Switch task back end to a more modular and flexible system. +- Add org profile tab with ability to edit organization name. +- Add org name to dashboard page title. +- Add cautioning to bucket renaming. +- Add option to generate all access token in tokens tab. +- Add option to generate read/write token in tokens tab. +- Add new Local Metrics Dashboard template that is created during Quick Start. + +### Bug Fixes +- Fixed scroll clipping found in label editing flow. +- Prevent overlapping text and dot in time range dropdown. +- Updated link in notes cell to a more useful site. +- Show error message when adding line protocol. +- Update UI Flux function documentation. +- Update System template to support math with floats. +- Fix the `window` function documentation. +- Fix typo in the `range` Flux function example. +- Update the `systemTime` function to use `system.time`. + +### UI Improvements +- Add general polish and empty states to Create Dashboard from Template overlay. + +--- + +## v2.0.0-alpha.8 [2019-04-12] + +### Features +- Add the ability to edit token's description. +- Add the option to create a dashboard from a template. +- Add the ability to add labels on variables. +- Add switch organizations dropdown to home navigation menu item. +- Add create org to side nav. +- Add "Getting Started with Flux" template. +- Update to Flux v0.25.0. + +### Bug Fixes +- Update shift to timeShift in Flux functions sidebar. + +### UI Improvements +- Update cursor to grab when hovering draggable areas. +- Sync note editor text and preview scrolling. +- Add the ability to create a bucket when creating an organization. + +--- + +## v2.0.0-alpha.7 [2019-03-28] + +### Features +- Insert Flux function near cursor in Flux Editor. +- Enable the use of variables in the Data Explorer and Cell Editor Overlay. +- Add a variable control bar to dashboards to select values for variables. +- Add ability to add variable to script from the side menu. +- Use time range for meta queries in Data Explorer and Cell Editor Overlay. +- Fix screen tearing bug in raw data view. +- Add copy to clipboard button to export overlays. +- Enable copying error messages to the clipboard from dashboard cells. +- Add the ability to update token's status in token list. +- Allow variables to be re-ordered within control bar on a dashboard. +- Add the ability to delete a template. +- Save user preference for variable control bar visibility and default to visible. +- Add the ability to clone a template. +- Add the ability to import a variable. + +### Bug Fixes +- Fix mismatch in bucket row and header. +- Allows user to edit note on cell. +- Fix empty state styles in scrapers in org view. +- Fix bucket creation error when changing retention rules types. +- Fix task creation error when switching schedule types. +- Fix hidden horizontal scrollbars in flux raw data view. +- Fix screen tearing bug in raw data View. +- Fix routing loop. + +### UI Improvements +- Move bucket selection in the query builder to the first card in the list. +- Ensure editor is automatically focused in Note Editor. +- Add ability to edit a template's name. + +--- + +## v2.0.0-alpha.6 [2019-03-15] + +### Release Notes + +{{% warn %}} +We have updated the way we do predefined dashboards to [include Templates](https://github.com/influxdata/influxdb/pull/12532) +in this release which will cause existing Organizations to not have a System +dashboard created when they build a new Telegraf configuration. +In order to get this functionality, remove your existing data and start from scratch. + +_**This will remove all data from your InfluxDB v2.0 instance including time series data.**_ + +###### Linux and macOS +```sh +rm ~/.influxdbv2/influxd.bolt +``` + +Once completed, `v2.0.0-alpha.6` can be started. +{{% /warn %}} + +### Features +- Add ability to import a dashboard. +- Add ability to import a dashboard from organization view. +- Add ability to export a dashboard and a task. +- Add `run` subcommand to `influxd` binary. This is also the default when no subcommand is specified. +- Add ability to save a query as a variable from the Data Explorer. +- Add System template on onboarding. + +### Bug Fixes +- Stop scrollbars from covering text in Flux editor. + +### UI Improvements +- Fine tune keyboard interactions for managing labels from a resource card. + +--- + +## v2.0.0-alpha.5 [2019-03-08] + +{{% warn %}} +This release includes a breaking change to the format in which Time-Structured Merge Tree (TSM) and index data are stored on disk. +_**Existing local data will not be queryable after upgrading to this release.**_ + +Prior to installing this release, remove all storage-engine data from your local InfluxDB 2.x installation. +To remove only TSM and index data and preserve all other other InfluxDB 2.x data (organizations, buckets, settings, etc), +run the following command. + +###### Linux and macOS +```sh +rm -r ~/.influxdbv2/engine +``` + +Once completed, InfluxDB v2.0.0-alpha.5 can be started. +{{% /warn %}} + +### Features +- Add labels to cloned tasks. +- Add ability to filter resources by clicking a label. +- Add ability to add a member to org. +- Improve representation of TSM tagsets on disk. +- Add ability to remove a member from org. +- Update to Flux v0.21.4. + +### Bug Fixes +- Prevent clipping of code snippets in Firefox. +- Prevent clipping of cell edit menus in dashboards. + +### UI Improvements +- Make code snippet copy functionality easier to use. +- Always show live preview in note cell editor. +- Redesign scraper creation workflow. +- Show warning in Telegraf and scraper lists when user has no buckets. +- Streamline label addition, removal, and creation from the dashboards list. + +--- + +## v2.0.0-alpha.4 [2019-02-21] + +### Features +- Add the ability to run a task manually from tasks page. +- Add the ability to select a custom time range in explorer and dashboard. +- Display the version information on the login page. +- Add the ability to update a variable's name and query. +- Add labels to cloned dashboard. +- Add ability filter resources by label name. +- Add ability to create or add labels to a resource from labels editor. +- Update to Flux v0.20. + +### Bug Fixes +- Update the bucket retention policy to update the time in seconds. + +### UI Improvements +- Update the preview in the label overlays to be shorter. +- Add notifications to scrapers page for created/deleted/updated scrapers. +- Add notifications to buckets page for created/deleted/updated buckets. +- Update the admin page to display error for password length. + +--- + +## v2.0.0-alpha.3 [2019-02-15] + +### Features +- Add the ability to name a scraper target. +- Display scraper name as the first and only updatable column in scrapers list. +- Add the ability to view runs for a task. +- Display last completed run for tasks list. +- Add the ability to view the logs for a specific task run. + +### Bug Fixes +- Update the inline edit for resource names to guard for empty strings. +- Prevent a new template dashboard from being created on every Telegraf config update. +- Fix overlapping buttons in Telegraf verify data step. + +### UI Improvements +- Move the download Telegraf config button to view config overlay. +- Combine permissions for user by type. + +--- + +## v2.0.0-alpha.2 [2019-02-07] + +### Features +- Add instructions button to view `$INFLUX_TOKEN` setup for Telegraf configs. +- Save the `$INFLUX_TOKEN` environmental variable in Telegraf configs. +- Update Tasks tab on Organizations page to look like Tasks Page. +- Add view button to view the Telegraf config toml. +- Add plugin information step to allow for config naming and configure one plugin at a time. +- Update Dashboards tab on Organizations page to look like Dashboards Page. + +### Bug Fixes +- Update the System Telegraf Plugin bundle to include the Swap plugin. +- Revert behavior allowing users to create authorizations on behalf of another user. + +### UI Improvements +- Change the wording for the plugin config form button to "Done." +- Change the wording for the Collectors configure step button to "Create and Verify." +- Standardize page loading spinner styles. +- Show checkbox on "Save As" button in data explorer. +- Make collectors plugins side bar visible in only the configure step. +- Swap retention policies on Create bucket page. + +--- + +## v2.0.0-alpha.1 [2019-01-23] + +This is the initial alpha release of InfluxDB 2.0. diff --git a/content/influxdb/v2.5/reference/release-notes/supported-release.md b/content/influxdb/v2.5/reference/release-notes/supported-release.md new file mode 100644 index 000000000..b9c1a72b0 --- /dev/null +++ b/content/influxdb/v2.5/reference/release-notes/supported-release.md @@ -0,0 +1,36 @@ +--- +title: Supported releases +description: > + Supported releases made to InfluxData. +menu: + influxdb_2_5_ref: + name: Supported releases + parent: Release notes +weight: 204 +related: + - /influxdb/v2.5/reference/release-notes +--- + +InfluxData provides support for the following minor, major, and maintenance releases: + +##### Major + +- Includes new features, enhancements to existing features, and applicable error corrections from all prior releases. +- InfluxDB supports backward compatibility within a major release. + +##### Minor (x.Y.z) + +- Includes new minor features, enhancements to existing features, and includes applicable error corrections from prior minor and maintenance releases. + +##### Maintenance (x.y.Z) + +- Includes error corrections severely affecting a number of customers that cannot wait for the next major or minor release. Include applicable error corrections made in prior maintenance releases. Released as as needed based on customer feedback and outstanding errors (no predefined schedule). +- Interim (x.y.z.A) Fourth digit (or beyond) are an exception, and only delivered to address specific customer issues. These releases are retired in a timely manner by the customer once the issue has been addressed in a standard release. +- InfluxData provides Support Services for the current and previous Minor Release (x.Y) including Maintenance Releases (x.y.Z) included in the respective Minor Release. +- When InfluxData releases a Major Release (X.y), InfluxData will provide Support Services for the current and preceding Major Release (X.y), including all Maintenance Releases (x.y.Z) included in the respective Minor Release that is available at least 12 months from the Major Release. + +For more information about supported 2.x releases, click [here](https://www.influxdata.com/legal/support-policy/). + +{{% note %}} +The link above contains the full legal description of the supported releases. +{{% /note %}} \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/sample-data.md b/content/influxdb/v2.5/reference/sample-data.md new file mode 100644 index 000000000..e9f8556f9 --- /dev/null +++ b/content/influxdb/v2.5/reference/sample-data.md @@ -0,0 +1,161 @@ +--- +title: Sample data +description: > + Use sample data to familiarize yourself with time series data and InfluxDB. + InfluxData provides many sample time series datasets to use with InfluxDB + and InfluxDB Cloud. +aliases: + - /influxdb/v2.5/write-data/sample-data/ +menu: influxdb_2_5_ref +weight: 7 +--- + +Use sample data to familiarize yourself with time series data and InfluxDB. +InfluxData provides many sample time series datasets to use with InfluxDB. +You can also use the [Flux InfluxDB sample package](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/sample/) +to view, download, and output sample datasets. + +- [Air sensor sample data](#air-sensor-sample-data) +- [Bird migration sample data](#bird-migration-sample-data) +- [NOAA sample data](#noaa-sample-data) + - [NOAA NDBC data](#noaa-ndbc-data) + - [NOAA water sample data](#noaa-water-sample-data) +- [USGS Earthquake data](#usgs-earthquake-data) + +## Air sensor sample data + +{{% caption %}} +**Size**: ~600 KB • **Updated**: every 15m +{{% /caption %}} + +Air sensor sample data represents an "Internet of Things" (IoT) use case by simulating +temperature, humidity, and carbon monoxide levels for multiple rooms in a building. + +To download and output the air sensor sample dataset, use the +[`sample.data()` function](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/sample/data/). + +```js +import "influxdata/influxdb/sample" + +sample.data(set: "airSensor") +``` + +#### Companion SQL sensor data +The air sensor sample dataset is paired with a relational SQL dataset with meta +information about sensors in each room. +These two sample datasets are used to demonstrate +[how to join time series data and relational data with Flux](/influxdb/v2.5/query-data/flux/sql/#join-sql-data-with-data-in-influxdb) +in the [Query SQL data sources](/influxdb/v2.5/query-data/flux/sql/) guide. + +Download SQL air sensor data + +## Bird migration sample data + +{{% caption %}} +**Size**: ~1.2 MB • **Updated**: N/A +{{% /caption %}} + +Bird migration sample data is adapted from the +[Movebank: Animal Tracking data set](https://www.kaggle.com/pulkit8595/movebank-animal-tracking) +and represents animal migratory movements throughout 2019. + +To download and output the bird migration sample dataset, use the +[`sample.data()` function](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/sample/data/). + +```js +import "influxdata/influxdb/sample" + +sample.data(set: "birdMigration") +``` + +The bird migration sample dataset is used in the [Work with geo-temporal data](/influxdb/v2.5/query-data/flux/geo/) +guide to demonstrate how to query and analyze geo-temporal data. + +## NOAA sample data + +There are two National Oceanic and Atmospheric Administration (NOAA) datasets +available to use with InfluxDB. + +- [NOAA NDBC data](#noaa-ndbc-data) +- [NOAA water sample data](#noaa-water-sample-data) + +### NOAA NDBC data + +{{% caption %}} +**Size**: ~1.3 MB • **Updated**: every 15m +{{% /caption %}} + +The **NOAA National Data Buoy Center (NDBC)** dataset provides the latest +observations from the NOAA NDBC network of buoys throughout the world. +Observations are updated approximately every 15 minutes. + +To download and output the most recent NOAA NDBC observations, use the +[`sample.data()` function](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/sample/data/). + +```js +import "influxdata/influxdb/sample" + +sample.data(set: "noaa") +``` + +{{% note %}} +#### Store historical NOAA NDBC data + +The **NOAA NDBC sample dataset** only returns the most recent observations; +not historical observations. +To regularly query and store NOAA NDBC observations, add the following as an +[InfluxDB task](/influxdb/v2.5/process-data/manage-tasks/). +Replace `example-org` and `example-bucket` with your organization name and the +name of the bucket to store data in. + +{{% get-shared-text "flux/noaa-ndbc-sample-task.md" %}} +{{% /note %}} + +### NOAA water sample data + +{{% caption %}} +**Size**: ~10 MB • **Updated**: N/A +{{% /caption %}} + +The **NOAA water sample dataset** is static dataset extracted from +[NOAA Center for Operational Oceanographic Products and Services](http://tidesandcurrents.noaa.gov/stations.html) data. +The sample dataset includes 15,258 observations of water levels (ft) collected every six minutes at two stations +(Santa Monica, CA (ID 9410840) and Coyote Creek, CA (ID 9414575)) over the period +from **August 18, 2015** through **September 18, 2015**. + +{{% note %}} +#### Store NOAA water sample data to avoid bandwidth usage +To avoid having to re-download this 10MB dataset every time you run a query, +we recommend that you [create a new bucket](/influxdb/v2.5/organizations/buckets/create-bucket/) +(`noaa`) and write the NOAA sample water data to it. + +```js +import "experimental/csv" + +csv.from(url: "https://influx-testdata.s3.amazonaws.com/noaa.csv") + |> to(bucket: "noaa", org: "example-org") +``` +{{% /note %}} + +The NOAA water sample dataset is used to demonstrate Flux queries in the +[Common queries](/influxdb/v2.5/query-data/common-queries/) and +[Common tasks](/influxdb/v2.5/process-data/common-tasks/) guides. + +## USGS Earthquake data + +{{% caption %}} +**Size**: ~6 MB • **Updated**: every 15m +{{% /caption %}} + +The United States Geological Survey (USGS) earthquake dataset contains observations +collected from USGS seismic sensors around the world over the last week. +Data is updated approximately every 15m. + +To download and output the last week of USGS seismic data, use the +[`sample.data()` function](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/sample/data/). + +```js +import "influxdata/influxdb/sample" + +sample.data(set: "usgs") +``` diff --git a/content/influxdb/v2.5/reference/syntax/_index.md b/content/influxdb/v2.5/reference/syntax/_index.md new file mode 100644 index 000000000..0ad91f0d2 --- /dev/null +++ b/content/influxdb/v2.5/reference/syntax/_index.md @@ -0,0 +1,18 @@ +--- +title: InfluxDB syntaxes +description: > + InfluxDB uses a handful of languages and syntaxes to perform tasks such as + writing, querying, processing, and deleting data. +weight: 5 +menu: + influxdb_2_5_ref: + name: Syntax +influxdb/v2.5/tags: [syntax] +--- + +InfluxDB uses a handful of languages and syntaxes to perform tasks such as +writing, querying, processing, and deleting data. +The following articles provide information about the different syntaxes used with +InfluxDB and the contexts in which they're used: + +{{< children >}} diff --git a/content/influxdb/v2.5/reference/syntax/annotated-csv/_index.md b/content/influxdb/v2.5/reference/syntax/annotated-csv/_index.md new file mode 100644 index 000000000..48f1f1003 --- /dev/null +++ b/content/influxdb/v2.5/reference/syntax/annotated-csv/_index.md @@ -0,0 +1,311 @@ +--- +title: Annotated CSV +description: > + InfluxDB and Flux return query results in annotated CSV format. + You can also read annotated CSV directly from Flux with the `csv.from()` function, + write data to InfluxDB using annotated CSV and the `influx write` command, or upload a CSV file in the UI. +weight: 103 +menu: + influxdb_2_5_ref: + parent: Syntax +influxdb/v2.5/tags: [csv, syntax] +related: + - /{{< latest "flux" >}}/stdlib/csv/from/ + - /influxdb/v2.5/reference/syntax/annotated-csv/extended/ +--- + +InfluxDB and Flux return query results in annotated CSV format. +You can also read annotated CSV directly from Flux with the [`csv.from()` function](/{{< latest "flux" >}}/stdlib/csv/from/), write data to InfluxDB using annotated CSV and the `influx write` command, or [upload a CSV file](/influxdb/cloud/write-data/no-code/load-data/#load-data-by-uploading-a-csv-or-line-protocol-file) in the UI. + +CSV tables must be encoded in UTF-8 and Unicode Normal Form C as defined in [UAX15](http://www.unicode.org/reports/tr15/). +InfluxDB removes carriage returns before newline characters. + +## Examples + +In this topic, you'll find examples of valid CSV syntax for responses to the following query: + +```js +from(bucket:"mydb/autogen") + |> range(start:2018-05-08T20:50:00Z, stop:2018-05-08T20:51:00Z) + |> group(columns:["_start","_stop", "region", "host"]) + |> yield(name:"my-result") +``` + +## CSV response format + +Flux supports encodings listed below. + +### Tables + +A table may have the following rows and columns. + +#### Rows + +- **Annotation rows**: describe column properties. + +- **Header row**: defines column labels (one header row per table). + +- **Record row**: describes data in the table (one record per row). + +##### Example + +Encoding of a table with and without a header row. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Header row](#) +[Without header row](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```sh +result,table,_start,_stop,_time,region,host,_value +my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43 +my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25 +my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62 +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```sh +my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43 +my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25 +my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62 +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +#### Columns + +In addition to the data columns, a table may include the following columns: + +- **Annotation column**: Only used in annotation rows. Always the first column. + Displays the name of an annotation. Value can be empty or a supported [annotation](#annotations). + You'll notice a space for this column for the entire length of the table, + so rows appear to start with `,`. + +- **Result column**: Contains the name of the result specified by the query. + +- **Table column**: Contains a unique ID for each table in a result. + +### Multiple tables and results + +If a file or data stream contains multiple tables or results, the following requirements must be met: + +- A table column indicates which table a row belongs to. +- All rows in a table are contiguous. +- An empty row delimits a new table boundary in the following cases: + - Between tables in the same result that do not share a common table schema. + - Between concatenated CSV files. +- Each new table boundary starts with new annotation and header rows. + +##### Example + +Encoding of two tables in the same result with the same schema (header row) and different schema. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Same schema](#) +[Different schema](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```sh +result,table,_start,_stop,_time,region,host,_value +my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43 +my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25 +my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62 +my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,west,A,62.73 +my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,west,B,12.83 +my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,west,C,51.62 + +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```sh +,result,table,_start,_stop,_time,region,host,_value +,my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43 +,my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25 +,my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62 + +,result,table,_start,_stop,_time,location,device,min,max +,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,USA,5825,62.73,68.42 +,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,USA,2175,12.83,56.12 +,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,USA,6913,51.62,54.25 +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +## Dialect options + +Flux supports the following dialect options for `text/csv` format. + +| Option | Description | Default | +| :-------- | :--------- |:-------:| +| **header** | If true, the header row is included. | `true` | +| **delimiter** | Character used to delimit columns. | `,` | +| **quoteChar** | Character used to quote values containing the delimiter. | `"` | +| **annotations** | List of annotations to encode (datatype, group, or default). | `empty` | +| **commentPrefix** | String prefix to identify a comment. Always added to annotations. | `#` | + +## Annotations + +Annotation rows describe column properties, and start with `#` (or commentPrefix value). +The first column in an annotation row always contains the annotation name. +Subsequent columns contain annotation values as shown in the table below. + +| Annotation name | Values | Description | +|:-------- |:--------- | :------- | +| **datatype** | a [data type](#data-types) or [line protocol element](#line-protocol-elements) | Describes the type of data or which line protocol element the column represents. | +| **group** | boolean flag `true` or `false` | Indicates the column is part of the group key. | +| **default** | a value of the column's data type | Value to use for rows with an empty value. | + + +{{% note %}} +To encode a table with its [group key](/influxdb/v2.5/reference/glossary/#group-key), +the `datatype`, `group`, and `default` annotations must be included. +If a table has no rows, the `default` annotation provides the group key values. +{{% /note %}} + +## Data types + +| Datatype | Flux type | Description | +| :-------- | :--------- | :---------- | +| boolean | bool | "true" or "false" | +| unsignedLong | uint | unsigned 64-bit integer | +| long | int | signed 64-bit integer | +| double | float | IEEE-754 64-bit floating-point number | +| string | string | UTF-8 encoded string | +| base64Binary | bytes | base64 encoded sequence of bytes as defined in RFC 4648 | +| dateTime | time | instant in time, may be followed with a colon : and a description of the format (number, RFC3339, RFC3339Nano) | +| duration | duration | length of time represented as an unsigned 64-bit integer number of nanoseconds | + + +## Line protocol elements +The `datatype` annotation accepts [data types](#data-types) and **line protocol elements**. +Line protocol elements identify how columns are converted into line protocol when using the +[`influx write` command](/influxdb/v2.5/reference/cli/influx/write/) to write annotated CSV to InfluxDB. + +| Line protocol element | Description | +|:--------------------- |:----------- | +| `measurement` | column value is the measurement | +| `field` _(default)_ | column header is the field key, column value is the field value | +| `tag` | column header is the tag key, column value is the tag value | +| `time` | column value is the timestamp _(alias for `dateTime`)_ | +| `ignore` or`ignored` | column is ignored and not included in line protocol | + +### Mixing data types and line protocol elements +Columns with [data types](#data-types) (other than `dateTime`) in the +`#datatype` annotation are treated as **fields** when converted to line protocol. +Columns without a specified data type default to `field` when converted to line protocol +and **column values are left unmodified** in line protocol. +_See an example [below](#example-of-mixing-data-types-line-protocol-elements) and +[line protocol data types and format](/influxdb/v2.5/reference/syntax/line-protocol/#data-types-and-format)._ + +### Time columns +A column with `time` or `dateTime` `#datatype` annotations are used as the timestamp +when converted to line protocol. +If there are multiple `time` or `dateTime` columns, the last column (on the right) +is used as the timestamp in line protocol. +Other time columns are ignored and the `influx write` command outputs a warning. + +Time column values should be **Unix timestamps** (in an [accepted timestamp precision](/influxdb/v2.5/write-data/#timestamp-precision)), +**RFC3339**, or **RFC3339Nano**. + +##### Example line protocol elements in datatype annotation +``` +#datatype measurement,tag,tag,field,field,ignored,time +m,cpu,host,time_steal,usage_user,nothing,time +cpu,cpu1,host1,0,2.7,a,1482669077000000000 +cpu,cpu1,host2,0,2.2,b,1482669087000000000 +``` + +Resulting line protocol: + +``` +cpu,cpu=cpu1,host=host1 time_steal=0,usage_user=2.7 1482669077000000000 +cpu,cpu=cpu1,host=host2 time_steal=0,usage_user=2.2 1482669087000000000 +``` + +##### Example of mixing data types line protocol elements +``` +#datatype measurement,tag,string,double,boolean,long,unsignedLong,duration,dateTime +#default test,annotatedDatatypes,,,,,, +m,name,s,d,b,l,ul,dur,time +,,str1,1.0,true,1,1,1ms,1 +,,str2,2.0,false,2,2,2us,2020-01-11T10:10:10Z +``` + +Resulting line protocol: + +``` +test,name=annotatedDatatypes s="str1",d=1,b=true,l=1i,ul=1u,dur=1000000i 1 +test,name=annotatedDatatypes s="str2",d=2,b=false,l=2i,ul=2u,dur=2000i 1578737410000000000 +``` + +## Annotated CSV in Flux +Flux requires all annotation and header rows in annotated CSV. +The example below illustrates how to use the [`csv.from()` function](/{{< latest "flux" >}}/stdlib/csv/from/) +to read annotated CSV in Flux: + +```js +import "csv" + +csvData = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,string,string,double,string,string +#group,false,false,true,true,false,true,false,false,true,true +#default,,,,,,,,,, +,result,table,_start,_stop,_time,region,host,_value,_measurement,_field +,,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43,cpu,usage_system +,,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25,cpu,usage_system +,,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62,cpu,usage_system +,,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,west,A,62.73,cpu,usage_system +,,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,west,B,12.83,cpu,usage_system +,,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,west,C,51.62,cpu,usage_system +" + +csv.from(csv: csvData) +``` + +{{% warn %}} +Flux only supports [data types](#data-types) in the `#datatype` annotation. +It does **does not** support [line protocol elements](#line-protocol-elements). +{{% /warn %}} + +## Errors + +If an error occurs during execution, a table returns with: + +- An error column that contains an error message. +- A reference column with a unique reference code to identify more information about the error. +- A second row with error properties. + +If an error occurs: + +- Before results materialize, the HTTP status code indicates an error. Error details are encoded in the csv table. +- After partial results are sent to the client, the error is encoded as the next table and remaining results are discarded. In this case, the HTTP status code remains 200 OK. + +##### Example + +Encoding for an error with the datatype annotation: + +``` +#datatype,string,long +,error,reference +,Failed to parse query,897 +``` + +Encoding for an error that occurs after a valid table has been encoded: + +``` +#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,string,string,double +,result,table,_start,_stop,_time,region,host,_value +,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,west,A,62.73 +,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,west,B,12.83 +,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,west,C,51.62 +``` + +``` +#datatype,string,long +,error,reference,query terminated: reached maximum allowed memory limits,576 +``` diff --git a/content/influxdb/v2.5/reference/syntax/annotated-csv/extended.md b/content/influxdb/v2.5/reference/syntax/annotated-csv/extended.md new file mode 100644 index 000000000..1abd2a8c8 --- /dev/null +++ b/content/influxdb/v2.5/reference/syntax/annotated-csv/extended.md @@ -0,0 +1,396 @@ +--- +title: Extended annotated CSV +description: > + Extended annotated CSV provides additional annotations and options that specify + how CSV data should be converted to [line protocol](/influxdb/v2.5/reference/syntax/line-protocol/) + and written to InfluxDB. +menu: + influxdb_2_5_ref: + name: Extended annotated CSV + parent: Annotated CSV +weight: 201 +influxdb/v2.5/tags: [csv, syntax, write] +related: + - /influxdb/v2.5/write-data/developer-tools/csv/ + - /influxdb/v2.5/reference/cli/influx/write/ + - /influxdb/v2.5/reference/syntax/line-protocol/ + - /influxdb/v2.5/reference/syntax/annotated-csv/ +--- + +**Extended annotated CSV** provides additional annotations and options that specify +how CSV data should be converted to [line protocol](/influxdb/v2.5/reference/syntax/line-protocol/) +and written to InfluxDB. +InfluxDB uses the [`csv2lp` library](https://github.com/influxdata/influxdb/tree/master/pkg/csv2lp) +to convert CSV into line protocol. +Extended annotated CSV supports all [Annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/) +annotations. + +{{% warn %}} +The Flux [`csv.from` function](/{{< latest "flux" >}}/stdlib/csv/from/) only supports +[annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/), not extended annotated CSV. +{{% /warn %}} + +To write data to InfluxDB, line protocol must include the following: + +- [measurement](/influxdb/v2.5/reference/syntax/line-protocol/#measurement) +- [field set](/influxdb/v2.5/reference/syntax/line-protocol/#field-set) +- [timestamp](/influxdb/v2.5/reference/syntax/line-protocol/#timestamp) _(Optional but recommended)_ +- [tag set](/influxdb/v2.5/reference/syntax/line-protocol/#tag-set) _(Optional)_ + +Extended CSV annotations identify the element of line protocol a column represents. + +## CSV Annotations +Extended annotated CSV extends and adds the following annotations: + +- [datatype](#datatype) +- [constant](#constant) +- [timezone](#timezone) +- [concat](#concat) + +### datatype +Use the `#datatype` annotation to specify the [line protocol element](/influxdb/v2.5/reference/syntax/line-protocol/#elements-of-line-protocol) +a column represents. +To explicitly define a column as a **field** of a specific data type, use the field +type in the annotation (for example: `string`, `double`, `long`, etc.). + +| Data type | Resulting line protocol | +|:---------- |:----------------------- | +| [measurement](#measurement) | Column is the **measurement** | +| [tag](#tag) | Column is a **tag** | +| [dateTime](#datetime) | Column is the **timestamp** | +| [field](#field) | Column is a **field** | +| [ignored](#ignored) | Column is ignored | +| [string](#string) | Column is a **string field** | +| [double](#double) | Column is a **float field** | +| [long](#long) | Column is an **integer field** | +| [unsignedLong](#unsignedlong) | Column is an **unsigned integer field** | +| [boolean](#boolean) | Column is a **boolean field** | + +#### measurement +Indicates the column is the **measurement**. + +#### tag +Indicates the column is a **tag**. +The **column label** is the **tag key**. +The **column value** is the **tag value**. + +#### dateTime +Indicates the column is the **timestamp**. +`time` is an alias for `dateTime`. +If the [timestamp format](#supported-timestamp-formats) includes a time zone, +the parsed timestamp includes the time zone offset. +By default, all timestamps are UTC. +You can also use the [`#timezone` annotation](#timezone) to adjust timestamps to +a specific time zone. + +{{% note %}} +There can only be **one** `dateTime` column. +{{% /note %}} + +The `influx write` command converts timestamps to [Unix timestamps](/influxdb/v2.5/reference/glossary/#unix-timestamp). +Append the timestamp format to the `dateTime` datatype with (`:`). + +```csv +#datatype dateTime:RFC3339 +#datatype dateTime:RFC3339Nano +#datatype dateTime:number +#datatype dateTime:2006-01-02 +``` + +##### Supported timestamp formats + +| Timestamp format | Description | Example | +|:---------------- |:----------- |:------- | +| **RFC3339** | RFC3339 timestamp | `2020-01-01T00:00:00Z` | +| **RFC3339Nano** | RFC3339 timestamp | `2020-01-01T00:00:00.000000000Z` | +| **number** | Unix timestamp | `1577836800000000000` | + +{{% note %}} +If using the `number` timestamp format and timestamps are **not in nanoseconds**, +use the [`influx write --precision` flag](/influxdb/v2.5/reference/cli/influx/write/#flags) +to specify the [timestamp precision](/influxdb/v2.5/reference/glossary/#precision). +{{% /note %}} + +##### Custom timestamp formats +To specify a custom timestamp format, use timestamp formats as described in the +[Go time package](https://golang.org/pkg/time). +For example: `2020-01-02`. + +#### field +Indicates the column is a **field**. +The **column label** is the **field key**. +The **column value** is the **field value**. + +{{% note %}} +With the `field` datatype, field values are copies **as-is** to line protocol. +For information about line protocol values and how they are written to InfluxDB, +see [Line protocol data types and formats](/influxdb/v2.5/reference/syntax/line-protocol/#data-types-and-format). +We generally recommend specifying the [field type](#field-types) in annotations. +{{% /note %}} + +#### ignored +The column is ignored and not written to InfluxDB. + +#### Field types +The column is a **field** of a specified type. +The **column label** is the **field key**. +The **column value** is the **field value**. + +- [string](#string) +- [double](#double) +- [long](#long) +- [unsignedLong](#unsignedlong) +- [boolean](#boolean) + +##### string +Column is a **[string](/influxdb/v2.5/reference/glossary/#string) field**. + +##### double +Column is a **[float](/influxdb/v2.5/reference/glossary/#float) field**. +By default, InfluxDB expects float values that use a period (`.`) to separate the +fraction from the whole number. +If column values include or use other separators, such as commas (`,`) to visually +separate large numbers into groups, specify the following **float separators**: + +- **fraction separator**: Separates the fraction from the whole number. +- **ignored separator**: Visually separates the whole number into groups but ignores + the separator when parsing the float value. + +Use the following syntax to specify **float separators**: + +```sh +# Syntax + + +# Example +., + +# With the float separators above +# 1,200,000.15 => 1200000.15 +``` + +Append **float separators** to the `double` datatype annotation with a colon (`:`). +For example: + +``` +#datatype "double:.," +``` + +{{% note %}} +If your **float separators** include a comma (`,`), wrap the column annotation in double +quotes (`""`) to prevent the comma from being parsed as a column separator or delimiter. +You can also [define a custom column separator](#define-custom-column-separator). +{{% /note %}} + +##### long +Column is an **[integer](/influxdb/v2.5/reference/glossary/#integer) field**. +If column values contain separators such as periods (`.`) or commas (`,`), specify +the following **integer separators**: + +- **fraction separator**: Separates the fraction from the whole number. + _**Integer values are truncated at the fraction separator when converted to line protocol.**_ +- **ignored separator**: Visually separates the whole number into groups but ignores + the separator when parsing the integer value. + +Use the following syntax to specify **integer separators**: + +```sh +# Syntax + + +# Example +., + +# With the integer separators above +# 1,200,000.00 => 1200000i +``` + +Append **integer separators** to the `long` datatype annotation with a colon (`:`). +For example: + +``` +#datatype "long:.," +``` + +{{% note %}} +If your **integer separators** include a comma (`,`), wrap the column annotation in double +quotes (`""`) to prevent the comma from being parsed as a column separator or delimiter. +You can also [define a custom column separator](#define-custom-column-separator). +{{% /note %}} + +##### unsignedLong +Column is an **[unsigned integer (uinteger)](/influxdb/v2.5/reference/glossary/#unsigned-integer) field**. +If column values contain separators such as periods (`.`) or commas (`,`), specify +the following **uinteger separators**: + +- **fraction separator**: Separates the fraction from the whole number. + _**Uinteger values are truncated at the fraction separator when converted to line protocol.**_ +- **ignored separator**: Visually separates the whole number into groups but ignores + the separator when parsing the uinteger value. + +Use the following syntax to specify **uinteger separators**: + +```sh +# Syntax + + +# Example +., + +# With the uinteger separators above +# 1,200,000.00 => 1200000u +``` + +Append **uinteger separators** to the `long` datatype annotation with a colon (`:`). +For example: + +``` +#datatype "usignedLong:.," +``` + +{{% note %}} +If your **uinteger separators** include a comma (`,`), wrap the column annotation in double +quotes (`""`) to prevent the comma from being parsed as a column separator or delimiter. +You can also [define a custom column separator](#define-custom-column-separator). +{{% /note %}} + +##### boolean +Column is a **[boolean](/influxdb/v2.5/reference/glossary/#boolean) field**. +If column values are not [supported boolean values](/influxdb/v2.5/reference/syntax/line-protocol/#boolean), +specify the **boolean format** with the following syntax: + +```sh +# Syntax +: + +# Example +y,Y,1:n,N,0 + +# With the boolean format above +# y => true, Y => true, 1 => true +# n => false, N => false, 0 => false +``` + +Append the **boolean format** to the `boolean` datatype annotation with a colon (`:`). +For example: + +``` +#datatype "boolean:y,Y:n,N" +``` + +{{% note %}} +If your **boolean format** contains commas (`,`), wrap the column annotation in double +quotes (`""`) to prevent the comma from being parsed as a column separator or delimiter. +You can also [define a custom column separator](#define-custom-column-separator). +{{% /note %}} + +### constant +Use the `#constant` annotation to define a constant column label and value for each row. +The `#constant` annotation provides a way to supply +[line protocol elements](/influxdb/v2.5/reference/syntax/line-protocol/#elements-of-line-protocol) +that don't exist in the CSV data. + +Use the following syntax to define constants: + +``` +#constant ,, +``` + +To provide multiple constants, include each `#constant` annotations on a separate line. + +``` +#constant measurement,m +#constant tag,dataSource,csv +``` + +{{% note %}} +For constants with `measurement` and `dateTime` datatypes, the second value in +the constant definition is the **column-value**. +{{% /note %}} + +### timezone +Use the `#timezone` annotation to update timestamps to a specific timezone. +By default, timestamps are parsed as UTC. +Use the `±HHmm` format to specify the timezone offset relative to UTC. + +### strict mode +Use the `:strict` keyword to indicate a loss of precision when parsing `long` or `unsignedLong` data types. +Turn on strict mode by using a column data type that ends with `strict`, such as `long:strict`. +When parsing `long` or `unsignedLong` value from a string value with fraction digits, the whole CSV row fails when in a strict mode. +A warning is printed when not in a strict mode, saying `line x: column y: '1.2' truncated to '1' to fit into long data type`. +For more information on strict parsing, see the [package documentation](https://github.com/influxdata/influxdb/tree/master/pkg/csv2lp). + +##### Timezone examples +| Timezone | Offset | +|:-------- | ------: | +| US Mountain Daylight Time | `-0600` | +| Central European Summer Time | `+0200` | +| Australia Eastern Standard Time | `+1000` | +| Apia Daylight Time | `+1400` | + +##### Timezone annotation example +``` +#timezone -0600 +``` + +### concat + +The `#concat` annotation adds a new column that is concatenated from existing columns according to bash-like string interpolation literal with variables referencing existing column labels. + +For example: + +``` +#concat,string,fullName,${firstName} ${lastName} +``` + +This is especially useful when constructing a timestamp from multiple columns. +For example, the following annotation will combine the given CSV columns into a timestamp: + +``` +#concat,dateTime:2006-01-02,${Year}-${Month}-${Day} + +Year,Month,Day,Hour,Minute,Second,Tag,Value +2020,05,22,00,00,00,test,0 +2020,05,22,00,05,00,test,1 +2020,05,22,00,10,00,test,2 +``` + +## Define custom column separator +If columns are delimited using a character other than a comma, use the `sep` +keyword to define a custom separator **in the first line of your CSV file**. + +``` +sep=; +``` + +## Annotation shorthand +Extended annotated CSV supports **annotation shorthand**. +Include the column label, datatype, and _(optional)_ default value in each column +header row using the following syntax: + +``` +|| +``` + +##### Example annotation shorthand +``` +m|measurement,location|tag|Hong Kong,temp|double,pm|long|0,time|dateTime:RFC3339 +weather,San Francisco,51.9,38,2020-01-01T00:00:00Z +weather,New York,18.2,,2020-01-01T00:00:00Z +weather,,53.6,171,2020-01-01T00:00:00Z +``` + +##### The shorthand explained +- The `m` column represents the **measurement** and has no default value. +- The `location` column is a **tag** with the default value, `Hong Kong`. +- The `temp` column is a **field** with **float** (`double`) values and no default value. +- The `pm` column is a **field** with **integer** (`long`) values and a default of `0`. +- The `time` column represents the **timestamp**, uses the **RFC3339** timestamp format, + and has no default value. + +##### Resulting line protocol +``` +weather,location=San\ Francisco temp=51.9,pm=38i 1577836800000000000 +weather,location=New\ York temp=18.2,pm=0i 1577836800000000000 +weather,location=Hong\ Kong temp=53.6,pm=171i 1577836800000000000 +``` diff --git a/content/influxdb/v2.5/reference/syntax/delete-predicate.md b/content/influxdb/v2.5/reference/syntax/delete-predicate.md new file mode 100644 index 000000000..f7e9dac76 --- /dev/null +++ b/content/influxdb/v2.5/reference/syntax/delete-predicate.md @@ -0,0 +1,105 @@ +--- +title: Delete predicate syntax +list_title: Delete predicate +description: > + InfluxDB uses an InfluxQL-like predicate syntax to determine what data points to delete. +menu: + influxdb_2_5_ref: + parent: Syntax + name: Delete predicate +weight: 104 +influxdb/v2.5/tags: [syntax, delete] +related: + - /influxdb/v2.5/write-data/delete-data/ + - /influxdb/v2.5/reference/cli/influx/delete/ +--- + +InfluxDB uses an InfluxQL-like predicate syntax to determine what data +[points](/influxdb/v2.5/reference/glossary/#point) to delete. +InfluxDB uses the delete predicate to evaluate the [series keys](/influxdb/v2.5/reference/glossary/#series-key) +of points in the time range specified in the delete request. +Points with series keys that evaluate to `true` for the given predicate are deleted. +Points with series keys that evaluate to `false` are preserved. + +A delete predicate is comprised of one or more [predicate expressions](/influxdb/v2.5/reference/glossary/#predicate-expression). +The left operand of the predicate expression is the column name. +The right operand is the column value. +Operands are compared using [comparison operators](#comparison-operators). +Use [logical operators](#logical-operators) to combine two or more predicate expressions. + +##### Example delete predicate +```sql +key1="value1" AND key2="value" +``` + +{{% warn %}} +#### Column limitations when deleting data +**InfluxDB {{< current-version >}}** supports deleting data by any column or tag +_**except**_ the following: + +- `_time` +- {{% oss-only %}}`_field`{{% /oss-only %}} +- `_value` + +{{% oss-only %}} + +_InfluxDB {{< current-version >}} does not support deleting data **by field**._ + +{{% /oss-only %}} + +{{% /warn %}} + +## Logical operators +Logical operators join two or more predicate expressions. + +| Operator | Description | +|:-------- |:----------- | +| `AND` | Both left and right operands must be `true` for the expression to be `true`. | + +## Comparison operators +Comparison operators compare left and right operands and return `true` or `false`. + +| Operator | Description | Example | Result | +|:-------- |:----------- |:-------: |:------:| +| `=` | Equal to | `"abc"="abc"` | `true` | + +## Delete predicate examples + +- [Delete points by measurement](#delete-points-by-measurement) +- {{% cloud-only %}}[Delete points by field](#delete-points-by-field){{% /cloud-only %}} +- [Delete points by tag set](#delete-points-by-tag-set) + +### Delete points by measurement +The following will delete points in the `sensorData` measurement: + +```sql +_measurement="sensorData" +``` + +{{% cloud-only %}} + +### Delete points by field +The following will delete points with the `temperature` field: + +```sql +_field="temperature" +``` + +{{% /cloud-only %}} + +### Delete points by tag set +The following will delete points from the `prod-1.4` host in the `us-west` region: + +```sql +host="prod-1.4" AND region="us-west" +``` + +## Limitations +The delete predicate syntax has the following limitations. + +- Delete predicates do not support regular expressions. +- Delete predicates do not support the `OR` logical operator. +- Delete predicates only support equality (`=`), not inequality (`!=`). +- Delete predicates can use any column or tag **except** `_time` + {{% oss-only %}}, `_field`, {{% /oss-only %}}or `_value`. + \ No newline at end of file diff --git a/content/influxdb/v2.5/reference/syntax/flux/_index.md b/content/influxdb/v2.5/reference/syntax/flux/_index.md new file mode 100644 index 000000000..bed772d8b --- /dev/null +++ b/content/influxdb/v2.5/reference/syntax/flux/_index.md @@ -0,0 +1,29 @@ +--- +title: Flux syntax +list_title: Flux +description: > + Flux is a functional data scripting language designed for querying, analyzing, and acting on data. +menu: + influxdb_2_5_ref: + parent: Syntax + name: Flux + identifier: flux-syntax +weight: 101 +influxdb/v2.5/tags: [syntax, flux] +--- + +Flux is a functional data scripting language designed for querying, analyzing, and acting on data. + +## Flux documentation +View the [Flux documentation](/{{< latest "flux" >}}/) for more information about +the Flux syntax. + +- [Get started with Flux](/{{< latest "flux" >}}/get-started/) +- [Flux standard library](/{{< latest "flux" >}}/stdlib/) +- [Flux language specification](/{{< latest "flux" >}}/spec/) + +--- + +**The following provide information about Flux and InfluxDB:** + +{{< children >}} diff --git a/content/influxdb/v2.5/reference/syntax/flux/flux-vs-influxql.md b/content/influxdb/v2.5/reference/syntax/flux/flux-vs-influxql.md new file mode 100644 index 000000000..591dca03a --- /dev/null +++ b/content/influxdb/v2.5/reference/syntax/flux/flux-vs-influxql.md @@ -0,0 +1,356 @@ +--- +title: Flux vs InfluxQL +description: > + Flux is an alternative to InfluxQL and other SQL-like query languages for querying and analyzing data. + Learn about what's possible with Flux and how Flux compares to InfluxQL. +aliases: + - /influxdb/v2.5/reference/flux/flux-vs-influxql/ +menu: + influxdb_2_5_ref: + name: Flux vs InfluxQL + parent: flux-syntax + weight: 105 +--- + +Flux is an alternative to [InfluxQL](/influxdb/v2.5/query-data/influxql/) +and other SQL-like query languages for querying and analyzing data. +Flux uses functional language patterns that overcome many InfluxQL limitations. +Check out the following distinctions between Flux and InfluxQL: + +- [Tasks possible with Flux](#tasks-possible-with-flux) +- [InfluxQL and Flux parity](#influxql-and-flux-parity) + +## Tasks possible with Flux + +- [Joins](#joins) +- [Math across measurements](#math-across-measurements) +- [Sort by tags](#sort-by-tags) +- [Group by any column](#group-by-any-column) +- [Window by calendar months and years](#window-by-calendar-months-and-years) +- [Work with multiple data sources](#work-with-multiple-data-sources) +- [DatePart-like queries](#datepart-like-queries) +- [Pivot](#pivot) +- [Histograms](#histograms) +- [Covariance](#covariance) +- [Cast booleans to integers](#cast-booleans-to-integers) +- [String manipulation and data shaping](#string-manipulation-and-data-shaping) +- [Work with geo-temporal data](#work-with-geo-temporal-data) + +### Joins +InfluxQL has never supported joins. Although you can use a join in a [TICKscript](/{{< latest "kapacitor" >}}/tick/introduction/), +TICKscript's join capabilities are limited. +Flux's [`join()` function](/{{< latest "flux" >}}/stdlib/universe/join/) lets you +join data **from any bucket, any measurement, and on any columns** as long as +each data set includes the columns to join on. + +```js +dataStream1 = from(bucket: "example-bucket1") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "network" and r._field == "bytes-transferred") + +dataStream2 = from(bucket: "example-bucket2") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "httpd" and r._field == "requests-per-sec") + +join(tables: {d1: dataStream1, d2: dataStream2}, on: ["_time", "_stop", "_start", "host"]) +``` + +_For an in-depth walkthrough of using the `join()` function, see [how to join data with Flux](/influxdb/v2.5/query-data/flux/join/)._ + +### Math across measurements +Being able to perform joins across measurements lets you calculate +data from separate measurements. +The example below takes data from two measurements, `mem` and `processes`, +joins them, and then calculates the average amount of memory used per running process: + +```js +// Memory used (in bytes) +memUsed = from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used") + +// Total processes running +procTotal = from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "processes" and r._field == "total") + +// Join memory used with total processes to calculate +// the average memory (in MB) used for running processes. +join(tables: {mem: memUsed, proc: procTotal}, on: ["_time", "_stop", "_start", "host"]) + |> map(fn: (r) => ({_time: r._time, _value: r._value_mem / r._value_proc / 1000000})) +``` + +### Sort by tags +InfluxQL's sorting capabilities only let you control the +sort order of `time` using the `ORDER BY time` clause. +The Flux [`sort()` function](/{{< latest "flux" >}}/stdlib/universe/sort) +sorts records based on a list of columns. +Depending on the column type, Flux sorts records lexicographically, numerically, or chronologically. + +```js +from(bucket: "example-bucket") + |> range(start: -12h) + |> filter(fn: (r) => r._measurement == "system" and r._field == "uptime") + |> sort(columns: ["region", "host", "_value"]) +``` + +### Group by any column +InfluxQL lets you group by tags or time intervals only. +Flux lets you group data by any column, including `_value`. +Use the Flux [`group()` function](/{{< latest "flux" >}}/stdlib/universe/group/) +to define which columns to group data by. + +```js +from(bucket:"example-bucket") + |> range(start: -12h) + |> filter(fn: (r) => r._measurement == "system" and r._field == "uptime" ) + |> group(columns:["host", "_value"]) +``` + +### Window by calendar months and years +InfluxQL does not support windowing data by calendar months and years due to their varied lengths. +Flux supports calendar month and year duration units (`1mo`, `1y`) and lets you +window and aggregate data by calendar month and year. + +```js +from(bucket:"example-bucket") + |> range(start:-1y) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent" ) + |> aggregateWindow(every: 1mo, fn: mean) +``` + +### Work with multiple data sources +InfluxQL can only query data stored in InfluxDB. +Flux can query data from other data sources such as CSV, PostgreSQL, MySQL, Google BigTable, and more. +Join that data with data in InfluxDB to enrich query results. + +- [Flux CSV package](/{{< latest "flux" >}}/stdlib/csv/) +- [Flux SQL package](/{{< latest "flux" >}}/stdlib/sql/) +- [Flux BigTable package](/{{< latest "flux" >}}/stdlib/experimental/bigtable/) + + +```js +import "csv" +import "sql" + +csvData = csv.from(csv: rawCSV) + +sqlData = sql.from( + driverName: "postgres", + dataSourceName: "postgresql://user:password@localhost", + query: "SELECT * FROM example_table", +) + +data = from(bucket: "example-bucket") + |> range(start: -24h) + |> filter(fn: (r) => r._measurement == "sensor") + +auxData = join(tables: {csv: csvData, sql: sqlData}, on: ["sensor_id"]) +enrichedData = join(tables: {data: data, aux: auxData}, on: ["sensor_id"]) + +enrichedData + |> yield(name: "enriched_data") +``` + +_For an in-depth walkthrough of querying SQL data, see [Query SQL data sources](/influxdb/v2.5/query-data/flux/sql/)._ + +### DatePart-like queries +InfluxQL doesn't support DatePart-like queries that only return results during specified hours of the day. +The Flux [`hourSelection` function](/{{< latest "flux" >}}/stdlib/universe/hourselection/) +returns only data with time values in a specified hour range. + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "cpu" and r.cpu == "cpu-total") + |> hourSelection(start: 9, stop: 17) +``` + +### Pivot +Pivoting data tables isn't supported in InfluxQL. +Use the Flux [`pivot()` function](/{{< latest "flux" >}}/stdlib/universe/pivot) +to pivot data tables by `rowKey`, `columnKey`, and `valueColumn` parameters. + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "cpu" and r.cpu == "cpu-total") + |> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value") +``` + +### Histograms +Generating histograms isn't supported in InfluxQL. +Use the Flux [`histogram()` function](/{{< latest "flux" >}}/stdlib/universe/histogram) to +generate a cumulative histogram. + +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") + |> histogram(buckets: [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) +``` + +_For more examples, see [how to create histograms with Flux](/influxdb/v2.5/query-data/flux/histograms/)._ + +### Covariance +Flux provides functions for simple covariance calculations. +Use the [`covariance()` function](/{{< latest "flux" >}}/stdlib/universe/covariance) +to calculate the covariance between two columns and the [`cov()` function](/{{< latest "flux" >}}/stdlib/universe/cov) +to calculate the covariance between two data streams. + +###### Covariance between two columns +```js +from(bucket: "example-bucket") + |> range(start:-5m) + |> covariance(columns: ["x", "y"]) +``` + +###### Covariance between two streams of data +```js +table1 = from(bucket: "example-bucket") + |> range(start: -15m) + |> filter(fn: (r) => r._measurement == "measurement_1") + +table2 = from(bucket: "example-bucket") + |> range(start: -15m) + |> filter(fn: (r) => r._measurement == "measurement_2") + +cov(x: table1, y: table2, on: ["_time", "_field"]) +``` + +### Cast booleans to integers +InfluxQL supports type casting for numeric data types (floats to integers and vice versa) only. +Use [Flux type conversion functions](/{{< latest "flux" >}}/function-types/#type-conversions) +to perform many more type conversions, including casting boolean values to integers. + +##### Cast boolean field values to integers +```js +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "m" and r._field == "bool_field") + |> toInt() +``` + +### String manipulation and data shaping +InfluxQL doesn't support string manipulation when querying data. +Use [Flux Strings package](/{{< latest "flux" >}}/stdlib/strings/) functions to operate on string data. +Combine functions in this package with the [`map()` function](/{{< latest "flux" >}}/stdlib/universe/map/) to perform operations like sanitizing and normalizing strings. + +```js +import "strings" + +from(bucket: "example-bucket") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "weather" and r._field == "temp") + |> map( + fn: (r) => ({ + r with + location: strings.toTitle(v: r.location), + sensor: strings.replaceAll(v: r.sensor, t: " ", u: "-"), + status: strings.substring(v: r.status, start: 0, end: 8) + }) + ) +``` + +### Work with geo-temporal data +InfluxQL doesn't support working with geo-temporal data. +The [Flux Geo package](/{{< latest "flux" >}}/stdlib/experimental/geo/) is a collection of functions that +let you shape, filter, and group geo-temporal data. + +```js +import "experimental/geo" + +from(bucket: "geo/autogen") + |> range(start: -1w) + |> filter(fn: (r) => r._measurement == "taxi") + |> geo.shapeData(latField: "latitude", lonField: "longitude", level: 20) + |> geo.filterRows(region: {lat: 40.69335938, lon: -73.30078125, radius: 20.0}, strict: true) + |> geo.asTracks(groupBy: ["fare-id"]) +``` + + +## InfluxQL and Flux parity +We're continuing to add functions to complete parity between Flux and InfluxQL. +The table below shows InfluxQL statements, clauses, and functions along with their equivalent Flux functions. + +_For a complete list of Flux functions, [view all Flux functions](/{{< latest "flux" >}}/stdlib/all-functions/)._ + +| InfluxQL | Flux Functions | +| :------------------------------------------------------------------------------------------------------------------------------------------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [SELECT](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#the-basic-select-statement) | [filter()](/{{< latest "flux" >}}/stdlib/universe/filter/) | +| [WHERE](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#the-where-clause) | [filter()](/{{< latest "flux" >}}/stdlib/universe/filter/), [range()](/{{< latest "flux" >}}/stdlib/universe/range/) | +| [GROUP BY](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#the-group-by-clause) | [group()](/{{< latest "flux" >}}/stdlib/universe/group/) | +| [INTO](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#the-into-clause) | [to()](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/to/) | +| [ORDER BY](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#order-by-time-desc) | [sort()](/{{< latest "flux" >}}/stdlib/universe/sort/) | +| [LIMIT](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#the-limit-clause) | [limit()](/{{< latest "flux" >}}/stdlib/universe/limit/) | +| [SLIMIT](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#the-slimit-clause) | -- | +| [OFFSET](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#the-offset-clause) | -- | +| [SOFFSET](/{{< latest "influxdb" "v1" >}}/query_language/explore-data/#the-soffset-clause) | -- | +| [SHOW DATABASES](/{{< latest "influxdb" "v1" >}}/query_language/explore-schema/#show-databases) | [buckets()](/{{< latest "flux" >}}/stdlib/universe/buckets/) | +| [SHOW MEASUREMENTS](/{{< latest "influxdb" "v1" >}}/query_language/explore-schema/#show-measurements) | [schema.measurements](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/measurements) | +| [SHOW FIELD KEYS](/{{< latest "influxdb" "v1" >}}/query_language/explore-schema/#show-field-keys) | [keys()](/{{< latest "flux" >}}/stdlib/universe/keys/) | +| [SHOW RETENTION POLICIES](/{{< latest "influxdb" "v1" >}}/query_language/explore-schema/#show-retention-policies) | [buckets()](/{{< latest "flux" >}}/stdlib/universe/buckets/) | +| [SHOW TAG KEYS](/{{< latest "influxdb" "v1" >}}/query_language/explore-schema/#show-tag-keys) | [schema.tagKeys()](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/tagkeys), [schema.measurementTagKeys()](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/measurementtagkeys) | +| [SHOW TAG VALUES](/{{< latest "influxdb" "v1" >}}/query_language/explore-schema/#show-tag-values) | [schema.tagValues()](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/tagvalues), [schema.measurementTagValues()](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/measurementtagvalues) | +| [SHOW SERIES](/{{< latest "influxdb" "v1" >}}/query_language/explore-schema/#show-series) | -- | +| [CREATE DATABASE](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#create-database) | N/A | +| [DROP DATABASE](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#delete-a-database-with-drop-database) | N/A | +| [DROP SERIES](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#drop-series-from-the-index-with-drop-serie) | N/A | +| [DELETE](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#delete-series-with-delete) | N/A | +| [DROP MEASUREMENT](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#delete-measurements-with-drop-measurement) | N/A | +| [DROP SHARD](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#delete-a-shard-with-drop-shard) | N/A | +| [CREATE RETENTION POLICY](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#create-retention-policies-with-create-retention-policy) | N/A | +| [ALTER RETENTION POLICY](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#modify-retention-policies-with-alter-retention-policy) | N/A | +| [DROP RETENTION POLICY](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#delete-retention-policies-with-drop-retention-policy) | N/A | +| [COUNT](/{{< latest "influxdb" "v1" >}}/query_language/functions#count) | [count()](/{{< latest "flux" >}}/stdlib/universe/count/) | +| [DISTINCT](/{{< latest "influxdb" "v1" >}}/query_language/functions#distinct) | [distinct()](/{{< latest "flux" >}}/stdlib/universe/distinct/) | +| [INTEGRAL](/{{< latest "influxdb" "v1" >}}/query_language/functions#integral) | [integral()](/{{< latest "flux" >}}/stdlib/universe/integral/) | +| [MEAN](/{{< latest "influxdb" "v1" >}}/query_language/functions#mean) | [mean()](/{{< latest "flux" >}}/stdlib/universe/mean/) | +| [MEDIAN](/{{< latest "influxdb" "v1" >}}/query_language/functions#median) | [median()](/{{< latest "flux" >}}/stdlib/universe/median/) | +| [MODE](/{{< latest "influxdb" "v1" >}}/query_language/functions#mode) | [mode()](/{{< latest "flux" >}}/stdlib/universe/mode/) | +| [SPREAD](/{{< latest "influxdb" "v1" >}}/query_language/functions#spread) | [spread()](/{{< latest "flux" >}}/stdlib/universe/spread/) | +| [STDDEV](/{{< latest "influxdb" "v1" >}}/query_language/functions#stddev) | [stddev()](/{{< latest "flux" >}}/stdlib/universe/stddev/) | +| [SUM](/{{< latest "influxdb" "v1" >}}/query_language/functions#sum) | [sum()](/{{< latest "flux" >}}/stdlib/universe/sum/) | +| [BOTTOM](/{{< latest "influxdb" "v1" >}}/query_language/functions#bottom) | [bottom()](/{{< latest "flux" >}}/stdlib/universe/bottom/) | +| [FIRST](/{{< latest "influxdb" "v1" >}}/query_language/functions#first) | [first()](/{{< latest "flux" >}}/stdlib/universe/first/) | +| [LAST](/{{< latest "influxdb" "v1" >}}/query_language/functions#last) | [last()](/{{< latest "flux" >}}/stdlib/universe/last/) | +| [MAX](/{{< latest "influxdb" "v1" >}}/query_language/functions#max) | [max()](/{{< latest "flux" >}}/stdlib/universe/max/) | +| [MIN](/{{< latest "influxdb" "v1" >}}/query_language/functions#min) | [min()](/{{< latest "flux" >}}/stdlib/universe/min/) | +| [PERCENTILE](/{{< latest "influxdb" "v1" >}}/query_language/functions#percentile) | [quantile()](/{{< latest "flux" >}}/stdlib/universe/quantile/) | +| [SAMPLE](/{{< latest "influxdb" "v1" >}}/query_language/functions#sample) | [sample()](/{{< latest "flux" >}}/stdlib/universe/sample/) | +| [TOP](/{{< latest "influxdb" "v1" >}}/query_language/functions#top) | [top()](/{{< latest "flux" >}}/stdlib/universe/top/) | +| [ABS](/{{< latest "influxdb" "v1" >}}/query_language/functions#abs) | [math.abs()](/{{< latest "flux" >}}/stdlib/math/abs/) | +| [ACOS](/{{< latest "influxdb" "v1" >}}/query_language/functions#acos) | [math.acos()](/{{< latest "flux" >}}/stdlib/math/acos/) | +| [ASIN](/{{< latest "influxdb" "v1" >}}/query_language/functions#asin) | [math.asin()](/{{< latest "flux" >}}/stdlib/math/asin/) | +| [ATAN](/{{< latest "influxdb" "v1" >}}/query_language/functions#atan) | [math.atan()](/{{< latest "flux" >}}/stdlib/math/atan/) | +| [ATAN2](/{{< latest "influxdb" "v1" >}}/query_language/functions#atan2) | [math.atan2()](/{{< latest "flux" >}}/stdlib/math/atan2/) | +| [CEIL](/{{< latest "influxdb" "v1" >}}/query_language/functions#ceil) | [math.ceil()](/{{< latest "flux" >}}/stdlib/math/ceil/) | +| [COS](/{{< latest "influxdb" "v1" >}}/query_language/functions#cos) | [math.cos()](/{{< latest "flux" >}}/stdlib/math/cos/) | +| [CUMULATIVE_SUM](/{{< latest "influxdb" "v1" >}}/query_language/functions#cumulative-sum) | [cumulativeSum()](/{{< latest "flux" >}}/stdlib/universe/cumulativesum/) | +| [DERIVATIVE](/{{< latest "influxdb" "v1" >}}/query_language/functions#derivative) | [derivative()](/{{< latest "flux" >}}/stdlib/universe/derivative/) | +| [DIFFERENCE](/{{< latest "influxdb" "v1" >}}/query_language/functions#difference) | [difference()](/{{< latest "flux" >}}/stdlib/universe/difference/) | +| [ELAPSED](/{{< latest "influxdb" "v1" >}}/query_language/functions#elapsed) | [elapsed()](/{{< latest "flux" >}}/stdlib/universe/elapsed/) | +| [EXP](/{{< latest "influxdb" "v1" >}}/query_language/functions#exp) | [math.exp()](/{{< latest "flux" >}}/stdlib/math/exp/) | +| [FLOOR](/{{< latest "influxdb" "v1" >}}/query_language/functions#floor) | [math.floor()](/{{< latest "flux" >}}/stdlib/math/floor/) | +| [HISTOGRAM](/{{< latest "influxdb" "v1" >}}/query_language/functions#histogram) | [histogram()](/{{< latest "flux" >}}/stdlib/universe/histogram/) | +| [LN](/{{< latest "influxdb" "v1" >}}/query_language/functions#ln) | [math.log()](/{{< latest "flux" >}}/stdlib/math/log/) | +| [LOG](/{{< latest "influxdb" "v1" >}}/query_language/functions#log) | [math.logb()](/{{< latest "flux" >}}/stdlib/math/logb/) | +| [LOG2](/{{< latest "influxdb" "v1" >}}/query_language/functions#log2) | [math.log2()](/{{< latest "flux" >}}/stdlib/math/log2/) | +| [LOG10](/{{< latest "influxdb" "v1" >}}/query_language/functions#logt10) | [math.log10()](/{{< latest "flux" >}}/stdlib/math/log10/) | +| [MOVING_AVERAGE](/{{< latest "influxdb" "v1" >}}/query_language/functions#moving-average) | [movingAverage()](/{{< latest "flux" >}}/stdlib/universe/movingaverage/) | +| [NON_NEGATIVE_DERIVATIVE](/{{< latest "influxdb" "v1" >}}/query_language/functions#non-negative-derivative) | [derivative(nonNegative:true)](/{{< latest "flux" >}}/stdlib/universe/derivative/) | +| [NON_NEGATIVE_DIFFERENCE](/{{< latest "influxdb" "v1" >}}/query_language/functions#non-negative-difference) | [difference(nonNegative:true)](/{{< latest "flux" >}}/stdlib/universe/derivative/) | +| [POW](/{{< latest "influxdb" "v1" >}}/query_language/functions#pow) | [math.pow()](/{{< latest "flux" >}}/stdlib/math/pow/) | +| [ROUND](/{{< latest "influxdb" "v1" >}}/query_language/functions#round) | [math.round()](/{{< latest "flux" >}}/stdlib/math/round/) | +| [SIN](/{{< latest "influxdb" "v1" >}}/query_language/functions#sin) | [math.sin()](/{{< latest "flux" >}}/stdlib/math/sin/) | +| [SQRT](/{{< latest "influxdb" "v1" >}}/query_language/functions#sqrt) | [math.sqrt()](/{{< latest "flux" >}}/stdlib/math/sqrt/) | +| [TAN](/{{< latest "influxdb" "v1" >}}/query_language/functions#tan) | [math.tan()](/{{< latest "flux" >}}/stdlib/math/tan/) | +| [HOLT_WINTERS](/{{< latest "influxdb" "v1" >}}/query_language/functions#holt-winters) | [holtWinters()](/{{< latest "flux" >}}/stdlib/universe/holtwinters/) | +| [CHANDE_MOMENTUM_OSCILLATOR](/{{< latest "influxdb" "v1" >}}/query_language/functions#chande-momentum-oscillator) | [chandeMomentumOscillator()](/{{< latest "flux" >}}/stdlib/universe/chandemomentumoscillator/) | +| [EXPONENTIAL_MOVING_AVERAGE](/{{< latest "influxdb" "v1" >}}/query_language/functions#exponential-moving-average) | [exponentialMovingAverage()](/{{< latest "flux" >}}/stdlib/universe/exponentialmovingaverage/) | +| [DOUBLE_EXPONENTIAL_MOVING_AVERAGE](/{{< latest "influxdb" "v1" >}}/query_language/functions#double-exponential-moving-average) | [doubleEMA()](/{{< latest "flux" >}}/stdlib/universe/doubleema/) | +| [KAUFMANS_EFFICIENCY_RATIO](/{{< latest "influxdb" "v1" >}}/query_language/functions#kaufmans-efficiency-ratio) | [kaufmansER()](/{{< latest "flux" >}}/stdlib/universe/kaufmanser/) | +| [KAUFMANS_ADAPTIVE_MOVING_AVERAGE](/{{< latest "influxdb" "v1" >}}/query_language/functions#kaufmans-adaptive-moving-average) | [kaufmansAMA()](/{{< latest "flux" >}}/stdlib/universe/kaufmansama/) | +| [TRIPLE_EXPONENTIAL_MOVING_AVERAGE](/{{< latest "influxdb" "v1" >}}/query_language/functions#triple-exponential-moving-average) | [tripleEMA()](/{{< latest "flux" >}}/stdlib/universe/tripleema/) | +| [TRIPLE_EXPONENTIAL_DERIVATIVE](/{{< latest "influxdb" "v1" >}}/query_language/functions#triple-exponential-derivative) | [tripleExponentialDerivative()](/{{< latest "flux" >}}/stdlib/universe/tripleexponentialderivative/) | +| [RELATIVE_STRENGTH_INDEX](/{{< latest "influxdb" "v1" >}}/query_language/functions#relative-strength-index) | [relativeStrengthIndex()](/{{< latest "flux" >}}/stdlib/universe/relativestrengthindex/) | diff --git a/content/influxdb/v2.5/reference/syntax/line-protocol.md b/content/influxdb/v2.5/reference/syntax/line-protocol.md new file mode 100644 index 000000000..0d8753aa3 --- /dev/null +++ b/content/influxdb/v2.5/reference/syntax/line-protocol.md @@ -0,0 +1,284 @@ +--- +title: Line protocol +description: > + InfluxDB uses line protocol to write data points. + It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point. +menu: + influxdb_2_5_ref: + parent: Syntax +weight: 102 +influxdb/v2.5/tags: [write, line protocol, syntax] +aliases: + - /influxdb/v2.5/reference/line-protocol + - /influxdb/v2.5/write_protocols/line_protocol_tutorial/ + - /influxdb/v2.5/write_protocols/line_protocol_reference/ +related: + - /influxdb/v2.5/write-data/ +--- + +InfluxDB uses line protocol to write data points. +It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point. + +- [Elements of line protocol](#elements-of-line-protocol) +- [Data types and format](#data-types-and-format) +- [Quotes](#quotes) +- [Special characters](#special-characters) +- [Comments](#comments) +- [Naming restrictions](#naming-restrictions) +- [Duplicate points](#duplicate-points) + +```js +// Syntax +[,=[,=]] =[,=] [] + +// Example +myMeasurement,tag1=value1,tag2=value2 fieldKey="fieldValue" 1556813561098000000 +``` + +Lines separated by the newline character `\n` represent a single point +in InfluxDB. Line protocol is whitespace sensitive. + +{{% note %}} +Line protocol does not support the newline character `\n` in tag or field values. +{{% /note %}} + +## Elements of line protocol + +``` +measurementName,tagKey=tagValue fieldKey="fieldValue" 1465839830100400200 +--------------- --------------- --------------------- ------------------- + | | | | + Measurement Tag set Field set Timestamp +``` + +### Measurement +({{< req >}}) +The measurement name. +InfluxDB accepts one measurement per point. +_Measurement names are case-sensitive and subject to [naming restrictions](#naming-restrictions)._ + +_**Data type:** [String](#string)_ + + +### Tag set +_**Optional**_ – +All tag key-value pairs for the point. +Key-value relationships are denoted with the `=` operand. +Multiple tag key-value pairs are comma-delimited. +_Tag keys and tag values are case-sensitive. +Tag keys are subject to [naming restrictions](#naming-restrictions)._ + +_**Key data type:** [String](#string)_ +_**Value data type:** [String](#string)_ + +### Field set +({{< req >}}) +All field key-value pairs for the point. +Points must have at least one field. +_Field keys and string values are case-sensitive. +Field keys are subject to [naming restrictions](#naming-restrictions)._ + +_**Key data type:** [String](#string)_ +_**Value data type:** [Float](#float) | [Integer](#integer) | [UInteger](#uinteger) | [String](#string) | [Boolean](#boolean)_ + +{{% note %}} +_Always double quote string field values. More on quotes [below](#quotes)._ + +```sh +measurementName fieldKey="field string value" 1556813561098000000 +``` +{{% /note %}} + +### Timestamp +_**Optional**_ – +The [unix timestamp](/influxdb/v2.5/reference/glossary/#unix-timestamp) for the data point. +InfluxDB accepts one timestamp per point. +If no timestamp is provided, InfluxDB uses the system time (UTC) of its host machine. + +_**Data type:** [Unix timestamp](#unix-timestamp)_ + +{{% note %}} +#### Important notes about timestamps +- To ensure a data point includes the time a metric is observed (not received by InfluxDB), + include the timestamp. +- If your timestamps are not in nanoseconds, specify the precision of your timestamps + when [writing the data to InfluxDB](/influxdb/v2.5/write-data/#timestamp-precision). +{{% /note %}} + +### Whitespace +Whitespace in line protocol determines how InfluxDB interprets the data point. +The **first unescaped space** delimits the measurement and the tag set from the field set. +The **second unescaped space** delimits the field set from the timestamp. + +``` +measurementName,tagKey=tagValue fieldKey="fieldValue" 1465839830100400200 + | | + 1st space 2nd space +``` + +## Data types and format + +### Float +IEEE-754 64-bit floating-point numbers. +Default numerical type. +_InfluxDB supports scientific notation in float field values._ + +##### Float field value examples +```js +myMeasurement fieldKey=1.0 +myMeasurement fieldKey=1 +myMeasurement fieldKey=-1.234456e+78 +``` + +### Integer +Signed 64-bit integers. +Trailing `i` on the number specifies an integer. + +| Minimum integer | Maximum integer | +| --------------- | --------------- | +| `-9223372036854775808i` | `9223372036854775807i` | + +##### Integer field value examples +```js +myMeasurement fieldKey=1i +myMeasurement fieldKey=12485903i +myMeasurement fieldKey=-12485903i +``` + +### UInteger +Unsigned 64-bit integers. +Trailing `u` on the number specifies an unsigned integer. + +| Minimum uinteger | Maximum uinteger | +| ---------------- | ---------------- | +| `0u` | `18446744073709551615u` | + +##### UInteger field value examples +```js +myMeasurement fieldKey=1u +myMeasurement fieldKey=12485903u +``` + +### String +Plain text string. +Length limit 64KB. + +##### String example +```sh +# String measurement name, field key, and field value +myMeasurement fieldKey="this is a string" +``` + +### Boolean +Stores `true` or `false` values. + +| Boolean value | Accepted syntax | +|:-------------:|:--------------- | +| True | `t`, `T`, `true`, `True`, `TRUE` | +| False | `f`, `F`, `false`, `False`, `FALSE` | + +##### Boolean field value examples +```js +myMeasurement fieldKey=true +myMeasurement fieldKey=false +myMeasurement fieldKey=t +myMeasurement fieldKey=f +myMeasurement fieldKey=TRUE +myMeasurement fieldKey=FALSE +``` + +{{% note %}} +Do not quote boolean field values. +Quoted field values are interpreted as strings. +{{% /note %}} + +### Unix timestamp +Unix timestamp in a [specified precision](/influxdb/v2.5/reference/glossary/#unix-timestamp). +Default precision is nanoseconds (`ns`). + +| Minimum timestamp | Maximum timestamp | +| ----------------- | ----------------- | +| `-9223372036854775806` | `9223372036854775806` | + +##### Unix timestamp example +```js +myMeasurementName fieldKey="fieldValue" 1556813561098000000 +``` + +## Quotes +Line protocol supports single and double quotes as described in the following table: + +| Element | Double quotes | Single quotes | +| :------ | :------------: |:-------------: | +| Measurement | _Limited_ * | _Limited_ * | +| Tag key | _Limited_ * | _Limited_ * | +| Tag value | _Limited_ * | _Limited_ * | +| Field key | _Limited_ * | _Limited_ * | +| Field value | **Strings only** | Never | +| Timestamp | Never | Never | + +\* _Line protocol accepts double and single quotes in +measurement names, tag keys, tag values, and field keys, but interprets them as +part of the name, key, or value._ + +## Special Characters +Line protocol supports special characters in [string elements](#string). +In the following contexts, it requires escaping certain characters with a backslash (`\`): + +| Element | Escape characters | +|:------- |:----------------- | +| Measurement | Comma, Space | +| Tag key | Comma, Equals Sign, Space | +| Tag value | Comma, Equals Sign, Space | +| Field key | Comma, Equals Sign, Space | +| Field value | Double quote, Backslash | + +You do not need to escape other special characters. + +##### Examples of special characters in line protocol +```sh +# Measurement name with spaces +my\ Measurement fieldKey="string value" + +# Double quotes in a string field value +myMeasurement fieldKey="\"string\" within a string" + +# Tag keys and values with spaces +myMeasurement,tag\ Key1=tag\ Value1,tag\ Key2=tag\ Value2 fieldKey=100 + +# Emojis +myMeasurement,tagKey=🍭 fieldKey="Launch 🚀" 1556813561098000000 +``` + +### Escaping backslashes +Line protocol supports both literal backslashes and backslashes as an escape character. +With two contiguous backslashes, the first is interpreted as an escape character. +For example: + +| Backslashes | Interpreted as | +|:-----------:|:-------------:| +| `\` | `\` | +| `\\` | `\` | +| `\\\` | `\\` | +| `\\\\` | `\\` | +| `\\\\\` | `\\\` | +| `\\\\\\` | `\\\` | + +## Comments +Line protocol interprets `#` at the beginning of a line as a comment character +and ignores all subsequent characters until the next newline `\n`. + +```sh +# This is a comment +myMeasurement fieldKey="string value" 1556813561098000000 +``` + +## Naming restrictions +Measurement names, tag keys, and field keys cannot begin with an underscore `_`. +The `_` namespace is reserved for InfluxDB system use. + +## Duplicate points +A point is uniquely identified by the measurement name, tag set, and timestamp. +If you submit line protocol with the same measurement, tag set, and timestamp, +but with a different field set, the field set becomes the union of the old +field set and the new field set, where any conflicts favor the new field set. diff --git a/content/influxdb/v2.5/reference/telegraf-plugins.md b/content/influxdb/v2.5/reference/telegraf-plugins.md new file mode 100644 index 000000000..f66a9b9c5 --- /dev/null +++ b/content/influxdb/v2.5/reference/telegraf-plugins.md @@ -0,0 +1,12 @@ +--- +title: Telegraf plugins +description: > + Telegraf is a plugin-driven agent that collects, processes, aggregates, and writes metrics. + It supports four categories of plugins including input, output, aggregator, and processor. + View and search all available Telegraf plugins. +--- + +Telegraf is a plugin-driven agent that collects, processes, aggregates, and writes metrics. +It supports four categories of plugins including input, output, aggregator, and processor. + +}}/plugins//">View Telegraf plugins diff --git a/content/influxdb/v2.5/reference/urls.md b/content/influxdb/v2.5/reference/urls.md new file mode 100644 index 000000000..b709bea3f --- /dev/null +++ b/content/influxdb/v2.5/reference/urls.md @@ -0,0 +1,45 @@ +--- +title: InfluxDB OSS URLs +description: > + InfluxDB OSS is accessed at `localhost:8086` by default, but you can also + customize your InfluxDB host and port. +weight: 6 +menu: + influxdb_2_5_ref: + name: InfluxDB URLs +--- + +InfluxDB OSS is accessed at `localhost:8086` by default, but you can also +customize your InfluxDB host and port. + +**Default host:** `localhost` +**Default port:** `8086` + +{{< keep-url >}} +``` +http://localhost:8086/ +``` + +## Customize your InfluxDB OSS URL +To customize your InfluxDB host and port, use the +[`http-bind-address` configuration option](/influxdb/v2.5/reference/config-options/#http-bind-address) +when starting `influxd`. + +{{< keep-url >}} +```sh +# Syntax +influxd --http-bind-address : + +# Example - Run InfluxDB at http://example.com:8080 +influxd --http-bind-address example.com:8080 + +# Example - Run InfluxDB at http://localhost:8080 +influxd --http-bind-address :8080 +``` + +{{% note %}} +#### Configure DNS routing +You must configure DNS routing to successfully route requests to your custom hostname. +Methods for configuring DNS routing vary depending on your operating system and +network architecture and are not covered in this documentation. +{{% /note %}} diff --git a/content/influxdb/v2.5/security/_index.md b/content/influxdb/v2.5/security/_index.md new file mode 100644 index 000000000..8bb8a9a4a --- /dev/null +++ b/content/influxdb/v2.5/security/_index.md @@ -0,0 +1,25 @@ +--- +title: Manage security and authorization +description: > + Security, access control, and sensitive secret handling are incredibly important + when handling any sort of sensitive data. + This section provides information about managing the security of your InfluxDB instance. +weight: 12 +menu: + influxdb_2_5: + name: Security & authorization +influxdb/v2.5/tags: [security, authentication] +--- + +Security, access control, and sensitive secret handling are incredibly important +when handling any sort of sensitive data. +This section provides information about managing the security of your InfluxDB instance. + +{{% note %}} +#### InfluxDB 2.x/1.x compatibility +If you [upgraded from 1.x to {{< current-version >}}](/influxdb/v2.5/upgrade/v1-to-v2/), +use the [`influx v1 auth`](/influxdb/v2.5/reference/cli/influx/v1/auth/) commands +to manage authorizations for the [1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). +{{% /note %}} + +{{< children >}} diff --git a/content/influxdb/v2.5/security/disable-devel.md b/content/influxdb/v2.5/security/disable-devel.md new file mode 100644 index 000000000..e6d810cd3 --- /dev/null +++ b/content/influxdb/v2.5/security/disable-devel.md @@ -0,0 +1,17 @@ +--- +title: Disable development features +seotitle: Disable development features in InfluxDB +description: > + Disable development features that may not be desirable in production. +weight: 105 +menu: + influxdb_2_5: + parent: Security & authorization +influxdb/v2.5/tags: [security, development] +--- + +By default, InfluxDB {{< current-version >}} enables useful functionality that exposes some level of information about your instance. Two of these are endpoints for observability of the health and activity of your instance. The third is the bundled UI. Depending on your site requirements, you may want to disable one or more of these when running InfluxDB in production. To disable, use the following configuration options: + +- [Disable /debug/pprof](/influxdb/v2.5/reference/config-options/#pprof-disabled). This endpoint provides runtime profiling data. +- [Disable /metrics](/influxdb/v2.5/reference/config-options/#metrics-disabled). This endpoint exposes [internal InfluxDB metrics](/influxdb/v2.2/reference/internals/metrics/). +- [Disable UI](/influxdb/v2.5/reference/config-options/#ui-disabled). The user interface for InfluxDB. diff --git a/content/influxdb/v2.5/security/enable-hardening.md b/content/influxdb/v2.5/security/enable-hardening.md new file mode 100644 index 000000000..ef1766225 --- /dev/null +++ b/content/influxdb/v2.5/security/enable-hardening.md @@ -0,0 +1,48 @@ +--- +title: Enable security features +seotitle: Enable security and hardening features in InfluxDB +description: > + Enable a collection of additional security and hardening features in InfluxDB OSS to better + secure your InfluxDB instance. +weight: 102 +menu: + influxdb_2_5: + parent: Security & authorization +influxdb/v2.5/tags: [security, hardening] +--- + +InfluxDB {{< current-version >}} provides optional security features that ensure your +InfluxDB instance is secure in whatever environment it's used in. + +To enable all [additional security features](#security-features), use the +[`hardening-enabled` configuration option](/influxdb/v2.5/reference/config-options/#hardening-enabled) +when starting InfluxDB. + +## Security features + +- [Private IP Validation](#private-ip-validation) + +### Private IP Validation + +Some Flux functions ([`to()`](/flux/v0.x/stdlib/influxdata/influxdb/to/), +[`from()`](/flux/v0.x/stdlib/influxdata/influxdb/from/), [`http.post()`](/flux/v0.x/stdlib/http/post/), etc.), +[template fetching](/influxdb/v2.5/influxdb-templates/) and +[notification endpoints](influxdb/v2.5/monitor-alert/notification-endpoints/) +can require InfluxDB to make HTTP requests over the network. +With private IP validation enabled, InfluxDB first verifies that the IP address of the URL is not a private IP address. + +IP addresses are considered private if they fall into one of the following categories: + +- IPv4 loopback (`127.0.0.0/8`) +- RFC1918 (`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) +- RFC3927 (`169.254.0.0/16`) +- IPv6 loopback (`::1/128`) +- IPv6 link-local (`fe80::/10`) +- IPv6 unique local (`fc00::/7`) + +{{% note %}} +#### Private IP considerations +If your environment requires that these authenticated HTTP requests be made to private IP addresses, +omit the use of `--hardening-enabled` and +consider instead setting up egress firewalling to limit which hosts InfluxDB is allowed to connect. +{{% /note %}} diff --git a/content/influxdb/v2.5/security/enable-tls.md b/content/influxdb/v2.5/security/enable-tls.md new file mode 100644 index 000000000..ae400f5d2 --- /dev/null +++ b/content/influxdb/v2.5/security/enable-tls.md @@ -0,0 +1,154 @@ +--- +title: Enable TLS encryption +seotitle: Enable TLS/SSL encryption +description: > + Enable Transport Layer Security (TLS) and use the HTTPS protocol to secure communication between clients and InfluxDB. +weight: 101 +menu: + influxdb_2_5: + parent: Security & authorization +influxdb/v2.5/tags: [security, authentication, tls, https, ssl] +products: [oss] +--- + +Enabling TLS encrypts the communication between clients and the InfluxDB server. +When configured with a signed certificate, TLS also allows clients to verify the authenticity of the InfluxDB server. + +To set up TLS over HTTPS, do the following: + +- [Obtain requirements](#obtain-requirements) +- [Configure InfluxDB to use TLS](#configure-influxdb-to-use-tls) + +{{% warn %}} +InfluxData **strongly recommends** enabling HTTPS, especially if you plan on sending requests to InfluxDB over a network. +{{% /warn %}} + +## Obtain requirements + +To enable HTTPS with InfluxDB, you need a Transport Layer Security (TLS) certificate, also known as a Secured Sockets Layer (SSL) certificate. +InfluxDB supports three types of TLS certificates: + +* **Single domain certificates signed by a [Certificate Authority](https://en.wikipedia.org/wiki/Certificate_authority)** + + Single domain certificates provide cryptographic security to HTTPS requests and allow clients to verify the identity of the InfluxDB server. + These certificates are signed and issued by a trusted, third-party Certificate Authority (CA). + With this certificate option, every InfluxDB instance requires a unique single domain certificate. + +* **Wildcard certificates signed by a Certificate Authority** + + Wildcard certificates provide cryptographic security to HTTPS requests and allow clients to verify the identity of the InfluxDB server. + Wildcard certificates can be used across multiple InfluxDB instances on different servers. + +* **Self-signed certificates** + + Self-signed certificates are _not_ signed by a trusted, third-party CA. + Unlike CA-signed certificates, self-signed certificates only provide cryptographic security to HTTPS requests. + They do not allow clients to verify the identity of the InfluxDB server. + With this certificate option, every InfluxDB instance requires a unique self-signed certificate. + You can generate a self-signed certificate on your own machine. + + + + +## Configure InfluxDB to use TLS + +1. **Download or generate certificate files** + + If using a certificate provided by a CA, follow their instructions to download the certificate files. + + If using a self-signed certificate, use the `openssl` utility to create a certificate. + + The following command generates a private key file (.key) and a self-signed certificate file (.crt) with required permissions + and saves them to `/etc/ssl/`. + (Other paths will also work.) + Files remain valid for the specified `NUMBER_OF_DAYS`. + + ```sh + sudo openssl req -x509 -nodes -newkey rsa:2048 \ + -keyout /etc/ssl/influxdb-selfsigned.key \ + -out /etc/ssl/influxdb-selfsigned.crt \ + -days + ``` + + The command will prompt you for more information. + You can choose to fill out these fields or leave them blank; both actions generate valid certificate files. + +2. **Set certificate file permissions** + + The user running InfluxDB must have read permissions on the TLS certificate. + + {{% note %}}You may opt to set up multiple users, groups, and permissions. + Ultimately, make sure all users running InfluxDB have read permissions for the TLS certificate. + {{% /note %}} + + Run the following command to give InfluxDB read and write permissions on the certificate files. + + ```bash + sudo chmod 644 /etc/ssl/ + sudo chmod 600 /etc/ssl/ + ``` + +3. **Run `influxd` with TLS flags** + + Start InfluxDB with TLS command line flags: + + ```bash + influxd \ + --tls-cert="" \ + --tls-key="" + ``` + +4. **Verify TLS connection** + + + Ensure you can connect over HTTPS by running + + ``` + curl -v https://localhost:8086/api/v2/ping + ``` + + If using a self-signed certificate, use the `-k` flag to skip certificate verification: + + ``` + curl -vk https://localhost:8086/api/v2/ping + ``` + + With this command, you should see output confirming a succussful TLS handshake. + +You can further configure TLS settings using +[`tls-min-version`](/influxdb/v2.5/reference/config-options/#tls-min-version) +and +[`tls-strict-ciphers`](/influxdb/v2.5/reference/config-options/#tls-strict-ciphers). + +## Connect Telegraf to a secured InfluxDB instance + +To connect [Telegraf](/{{< latest "telegraf" >}}/) to an InfluxDB {{< current-version >}} instance with TLS enabled, +update the following `influxdb_v2` output settings in your Telegraf configuration file: + +- Update URLs to use HTTPS instead of HTTP. +- If using a self-signed certificate, uncomment and set `insecure_skip_verify` to `true`. + +### Example configuration + +```toml +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + +# Configuration for sending metrics to InfluxDB +[[outputs.influxdb_v2]] + ## The URLs of the InfluxDB cluster nodes. + ## + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + urls = ["https://127.0.0.1:8086"] + + [...] + + ## Optional TLS Config for use on HTTP connections. + [...] + ## Use TLS but skip chain & host verification + insecure_skip_verify = true +``` + +Restart Telegraf using the updated configuration file. diff --git a/content/influxdb/v2.5/security/secrets/_index.md b/content/influxdb/v2.5/security/secrets/_index.md new file mode 100644 index 000000000..5b0b5e817 --- /dev/null +++ b/content/influxdb/v2.5/security/secrets/_index.md @@ -0,0 +1,24 @@ +--- +title: Manage secrets +description: Manage, use, and store secrets in InfluxDB. +influxdb/v2.5/tags: [secrets, security] +menu: + influxdb_2_5: + parent: Security & authorization +weight: 103 +aliases: + - /influxdb/v2.5/security/secrets/manage-secrets/ +--- + +Secrets are key-value pairs that contain sensitive information you want to control +access to, such as API keys, passwords, or certificates. +There are two options for storing secrets with InfluxDB: + +- By default, secrets are Base64-encoded and stored in the InfluxDB embedded key value store, + [BoltDB](https://github.com/boltdb/bolt). +- You can also set up a Vault server to store secrets. + For details, see [Store secrets in Vault](/influxdb/v2.5/security/secrets/use-vault). + +--- + +{{< children >}} diff --git a/content/influxdb/v2.5/security/secrets/add.md b/content/influxdb/v2.5/security/secrets/add.md new file mode 100644 index 000000000..1a27571bc --- /dev/null +++ b/content/influxdb/v2.5/security/secrets/add.md @@ -0,0 +1,58 @@ +--- +title: Add secrets +description: Add secrets using the `influx` CLI or the InfluxDB API. +influxdb/v2.5/tags: [secrets, security] +menu: + influxdb_2_5: + parent: Manage secrets +weight: 301 +aliases: + - /influxdb/v2.5/security/secrets/manage-secrets/add/ +--- + +Add secrets using the `influx` command line interface (CLI) or the InfluxDB API. + + +- [Add a secret using the influx CLI](#add-a-secret-using-the-influx-cli) +- [Add a secret using the InfluxDB API](#add-a-secret-using-the-influxdb-api) + +## Add a secret using the influx CLI +Use the [`influx secret update` command](/influxdb/v2.5/reference/cli/influx/secret/update/) +to add a new secret to your organization. +Provide the secret key with the `-k` or `--key` flag. +You may also provide the secret value with the `-v` or `--value` flag. +If you do not provide the secret value with the `-v` or `--value` flag, +enter the value when prompted. + +{{% warn %}} +Providing a secret value with the `-v` or `--value` flag may expose the secret +in your command history. +{{% /warn %}} + +```sh +# Syntax +influx secret update -k + +# Example +influx secret update -k foo +``` + +## Add a secret using the InfluxDB API +Use the `PATCH` request method and the `/orgs/{orgID}/secrets` API endpoint to +add a new secret to your organization. + +**Include the following:** + +- Your [organization ID](/influxdb/v2.5/organizations/view-orgs/#view-your-organization-id) in the request URL +- Your [API token](/influxdb/v2.5/security/tokens/view-tokens/) in the `Authorization` header +- The secret key-value pair in the request body + + +```sh +curl --request PATCH http://localhost:8086/api/v2/orgs//secrets \ + --header 'Authorization: Token YOURAUTHTOKEN' \ + --header 'Content-type: application/json' \ + --data '{ + "": "" +}' +``` diff --git a/content/influxdb/v2.5/security/secrets/delete.md b/content/influxdb/v2.5/security/secrets/delete.md new file mode 100644 index 000000000..a0766070c --- /dev/null +++ b/content/influxdb/v2.5/security/secrets/delete.md @@ -0,0 +1,50 @@ +--- +title: Delete secrets +description: Delete secrets using the `influx` CLI or the InfluxDB API. +influxdb/v2.5/tags: [secrets, security] +menu: + influxdb_2_5: + parent: Manage secrets +weight: 304 +aliases: + - /influxdb/v2.5/security/secrets/manage-secrets/delete/ +--- + +Delete secrets using the `influx` command line interface (CLI) or the InfluxDB API. + +- [Delete a secret using the influx CLI](#delete-a-secret-using-the-influx-cli) +- [Delete secrets using the InfluxDB API](#delete-secrets-using-the-influxdb-api) + +## Delete a secret using the influx CLI +Use the [`influx secret delete` command](/influxdb/v2.5/reference/cli/influx/secret/delete/) +to delete a secret key-value pair from your organization. +Provide the secret key to delete with the `-k` or `--key` flag. + +```sh +# Syntax +influx secret delete -k + +# Example +influx secret delete -k foo +``` + +## Delete secrets using the InfluxDB API +Use the `POST` request method and the `orgs/{orgID}/secrets/delete` API endpoint +to delete one or more secrets. + +**Include the following:** + +- Your [organization ID](/influxdb/v2.5/organizations/view-orgs/#view-your-organization-id) in the request URL +- Your [API token](/influxdb/v2.5/security/tokens/view-tokens/) in the `Authorization` header +- An array of secret keys to delete in the request body + + +```bash +curl --request GET http://localhost:8086/api/v2/orgs//secrets/delete \ + --header 'Authorization: Token YOURAUTHTOKEN' \ + --data '{ + "secrets": [ + "" + ] +}' +``` diff --git a/content/influxdb/v2.5/security/secrets/update.md b/content/influxdb/v2.5/security/secrets/update.md new file mode 100644 index 000000000..8dcdc69a7 --- /dev/null +++ b/content/influxdb/v2.5/security/secrets/update.md @@ -0,0 +1,57 @@ +--- +title: Update secrets +description: Update secrets using the `influx` CLI or the InfluxDB API. +influxdb/v2.5/tags: [secrets, security] +menu: + influxdb_2_5: + parent: Manage secrets +weight: 303 +aliases: + - /influxdb/v2.5/security/secrets/manage-secrets/update/ +--- + +Update secrets using the `influx` command line interface (CLI) or the InfluxDB API. + +- [Update a secret using the influx CLI](#update-a-secret-using-the-influx-cli) +- [Update a secret using the InfluxDB API](#update-a-secret-using-the-influxdb-api) + +## Update a secret using the influx CLI +Use the [`influx secret update` command](/influxdb/v2.5/reference/cli/influx/secret/update/) +to update a secret in your organization. +Provide the secret key to update with the `-k` or `--key` flag. +You may also provide the secret value with the `-v` or `--value` flag. +If you do not provide the secret value with the `-v` or `--value` flag, +enter the value when prompted. + +{{% warn %}} +Providing a secret value with the `-v` or `--value` flag may expose the secret +in your command history. +{{% /warn %}} + +```sh +# Syntax +influx secret update -k + +# Example +influx secret update -k foo +``` + +## Update a secret using the InfluxDB API +Use the `PATCH` request method and the InfluxDB `/orgs/{orgID}/secrets` API endpoint +to update a secret in your organization. + +**Include the following:** + +- Your [organization ID](/influxdb/v2.5/organizations/view-orgs/#view-your-organization-id) in the request URL +- Your [API token](/influxdb/v2.5/security/tokens/view-tokens/) in the `Authorization` header +- The updated secret key-value pair in the request body + + +```sh +curl --request PATCH http://localhost:8086/api/v2/orgs//secrets \ + --header 'Authorization: Token YOURAUTHTOKEN' \ + --header 'Content-type: application/json' \ + --data '{ + "": "" +}' +``` diff --git a/content/influxdb/v2.5/security/secrets/use-vault.md b/content/influxdb/v2.5/security/secrets/use-vault.md new file mode 100644 index 000000000..bb24716a0 --- /dev/null +++ b/content/influxdb/v2.5/security/secrets/use-vault.md @@ -0,0 +1,98 @@ +--- +title: Store secrets in Vault +description: Use Vault as an InfluxDB secret store and manage secrets through the in InfluxDB API. +influxdb/v2.5/tags: [secrets, security] +menu: + influxdb_2_5: + parent: Manage secrets +weight: 306 +--- + +[Vault](https://www.vaultproject.io/) secures, stores, and controls access +to tokens, passwords, certificates, and other sensitive secrets. +Store sensitive secrets in Vault using InfluxDB's built-in Vault integration. + +To store secrets in Vault, complete the following steps: + +1. [Start a Vault server](#start-a-vault-server). +2. [Provide Vault server address and token](#provide-vault-server-address-and-token). +3. [Start InfluxDB](#start-influxdb). +4. [Manage secrets through the InfluxDB API](#manage-secrets-through-the-influxdb-api). + +## Start a Vault server + +Start a Vault server and ensure InfluxDB has network access to the server. + +The following links provide information about running Vault in both development and production: + +- [Install Vault](https://learn.hashicorp.com/vault/getting-started/install) +- [Start a Vault dev server](https://learn.hashicorp.com/vault/getting-started/dev-server) +- [Deploy Vault](https://learn.hashicorp.com/vault/getting-started/deploy) + +{{% note %}} +InfluxDB supports the [Vault KV Secrets Engine Version 2 API](https://www.vaultproject.io/api/secret/kv/kv-v2.html) only. +When you create a secrets engine, enable the `kv-v2` version by running: + +```js +vault secrets enable kv-v2 +``` +{{% /note %}} + +For this example, install Vault on your local machine and start a Vault dev server. + +```sh +vault server -dev +``` + +## Provide Vault server address and token + +Use `influxd` Vault-related tags or [Vault environment variables](https://www.vaultproject.io/docs/commands/index.html#environment-variables) +to provide connection credentials and other important Vault-related information to InfluxDB. + +### Required credentials + +#### Vault address + +Provide the API address of your Vault server _(available in the Vault server output)_ +using the [`--vault-addr` flag](/influxdb/v2.5/reference/config-options/#vault-addr) when +starting `influxd` or with the `VAULT_ADDR` environment variable. + +#### Vault token + +Provide your [Vault token](https://learn.hashicorp.com/vault/getting-started/authentication) +(required to access your Vault server) using the [`--vault-token` flag](/influxdb/v2.5/reference/config-options/#vault-token) +when starting `influxd` or with the `VAULT_TOKEN` environment variable. + +_Your Vault server configuration may require other Vault settings._ + +## Start InfluxDB + +Start the [`influxd` service](/influxdb/v2.5/reference/cli/influxd/) with the `--secret-store` +option set to `vault` any other necessary flags. + +```bash +influxd --secret-store vault \ + --vault-addr=http://127.0.0.1:8200 \ + --vault-token=s.0X0XxXXx0xXxXXxxxXxXxX0x +``` + +`influxd` includes the following Vault configuration options. +If set, these flags override any [Vault environment variables](https://www.vaultproject.io/docs/commands/index.html#environment-variables): + +- `--vault-addr` +- `--vault-cacert` +- `--vault-capath` +- `--vault-client-cert` +- `--vault-client-key` +- `--vault-max-retries` +- `--vault-client-timeout` +- `--vault-skip-verify` +- `--vault-tls-server-name` +- `--vault-token` + +For more information, see [InfluxDB configuration options](/influxdb/v2.5/reference/config-options/). + +## Manage secrets through the InfluxDB API + +Use the InfluxDB `/org/{orgID}/secrets` API endpoint to add tokens to Vault. +For details, see [Manage secrets](/influxdb/v2.5/security/secrets/manage-secrets/). diff --git a/content/influxdb/v2.5/security/secrets/use.md b/content/influxdb/v2.5/security/secrets/use.md new file mode 100644 index 000000000..7c133d2ce --- /dev/null +++ b/content/influxdb/v2.5/security/secrets/use.md @@ -0,0 +1,29 @@ +--- +title: Use secrets +description: Use secrets in a query with Flux. +influxdb/v2.5/tags: [secrets, security] +menu: + influxdb_2_5: + parent: Manage secrets +weight: 305 +aliases: + - /influxdb/v2.5/security/secrets/manage-secrets/use/ +--- + +## Use secrets in a query +Import the `influxdata/influxd/secrets` package and use the `secrets.get()` function +to populate sensitive data in queries with secrets from your secret store. + +```js +import "influxdata/influxdb/secrets" +import "sql" + +username = secrets.get(key: "POSTGRES_USERNAME") +password = secrets.get(key: "POSTGRES_PASSWORD") + +sql.from( + driverName: "postgres", + dataSourceName: "postgresql://${username}:${password}@localhost", + query:"SELECT * FROM example-table", +) +``` diff --git a/content/influxdb/v2.5/security/secrets/view.md b/content/influxdb/v2.5/security/secrets/view.md new file mode 100644 index 000000000..ffc08ff33 --- /dev/null +++ b/content/influxdb/v2.5/security/secrets/view.md @@ -0,0 +1,39 @@ +--- +title: View secret keys +description: View secret keys using the `influx` CLI or the InfluxDB API. +influxdb/v2.5/tags: [secrets, security] +menu: + influxdb_2_5: + parent: Manage secrets +weight: 302 +aliases: + - /influxdb/v2.5/security/secrets/manage-secrets/view/ +--- + +View secret keys using the `influx` command line interface (CLI) or the InfluxDB API. + +- [View secret keys using the influx CLI](#view-secret-keys-using-the-influx-cli) +- [View secret keys using the InfluxDB API](#view-secret-keys-using-the-influxdb-api) + +## View secret keys using the influx CLI +Use the [`influx secret list` command](/influxdb/v2.5/reference/cli/influx/secret/list/) +to list your organization's secret keys. + +```sh +influx secret list +``` + +## View secret keys using the InfluxDB API +Use the `GET` request method and the InfluxDB `/orgs/{orgID}/secrets` API endpoint +to view your organization's secrets keys. + +**Include the following:** + +- Your [organization ID](/influxdb/v2.5/organizations/view-orgs/#view-your-organization-id) in the request URL +- Your [API token](/influxdb/v2.5/security/tokens/view-tokens/) in the `Authorization` header + + +```sh +curl --request GET http://localhost:8086/api/v2/orgs//secrets \ + --header 'Authorization: Token YOURAUTHTOKEN' +``` diff --git a/content/influxdb/v2.5/security/tokens/_index.md b/content/influxdb/v2.5/security/tokens/_index.md new file mode 100644 index 000000000..e68726abf --- /dev/null +++ b/content/influxdb/v2.5/security/tokens/_index.md @@ -0,0 +1,47 @@ +--- +title: Manage API tokens +seotitle: Manage API tokens in InfluxDB +description: Manage API tokens in InfluxDB using the InfluxDB UI or the influx CLI. +aliases: + - /influxdb/v2.5/users/tokens +influxdb/v2.5/tags: [tokens, authentication, security] +menu: + influxdb_2_5: + name: Manage tokens + parent: Security & authorization +weight: 104 +--- + +InfluxDB **API tokens** ensure secure interaction between InfluxDB and external tools such as clients or applications. +An API token belongs to a specific user and identifies InfluxDB permissions within the user's organization. + +Learn how to create, view, update, or delete an API token. + +## API token types + +- [Operator API token](#operator-token) +- [All-Access API token](#all-access-token) +- [Read/Write token](#readwrite-token) + +#### Operator token +Grants full read and write access to **all organizations and all organization resources in InfluxDB OSS 2.x**. +Some operations, e.g. [retrieving the server configuration](/influxdb/v2.5/reference/config-options/), require operator permissions. +Operator tokens are created in the InfluxDB setup process. +To [create an operator token manually](/influxdb/v2.5/security/tokens/create-token/) with the InfluxDB UI, `api/v2` API, or `influx` CLI after the setup process is completed, you must use an existing [Operator token](/influxdb/v2.5/security/tokens/#operator-token). + +To create a new Operator token without using an existing one, see how to use the [`influxd recovery auth`](/influxdb/v2.5/reference/cli/influxd/recovery/auth/) CLI. + +{{% note %}} +Because Operator tokens have full read and write access to all organizations in the database, +we recommend [creating an All-Access token](/influxdb/v2.5/security/tokens/create-token/) +for each organization and using those to manage InfluxDB. +This helps to prevent accidental interactions across organizations. +{{% /note %}} + +#### All-Access token +Grants full read and write access to all resources in an organization. + +#### Read/Write token +Grants read access, write access, or both to specific buckets in an organization. + +{{< children hlevel="h2" >}} diff --git a/content/influxdb/v2.5/security/tokens/create-token.md b/content/influxdb/v2.5/security/tokens/create-token.md new file mode 100644 index 000000000..df8ac1b49 --- /dev/null +++ b/content/influxdb/v2.5/security/tokens/create-token.md @@ -0,0 +1,251 @@ +--- +title: Create a token +seotitle: Create an API token in InfluxDB +description: Create an API token in InfluxDB using the InfluxDB UI, the `influx` CLI, or the InfluxDB API. +aliases: + - /influxdb/v2.5/users/tokens/create-token/ +menu: + influxdb_2_5: + name: Create a token + parent: Manage tokens +weight: 201 +--- + +Create API tokens using the InfluxDB user interface (UI), the `influx` +command line interface (CLI), or the InfluxDB API. + +{{% note %}} + +To follow best practices for secure API token generation and retrieval, InfluxDB enforces access restrictions on API tokens. + +- Tokens are visible to the user who created the token. +- InfluxDB only allows access to the API token value immediately after the token is created. +- You can't change access (**read/write**) permissions for an API token after it's created. +- Tokens stop working when the user who created the token is deleted. + +**We recommend the following for managing your tokens:** + +- Create a generic user to create and manage tokens for writing data. +- Store your tokens in a secure password vault for future access. + +{{% /note %}} + +- [Manage tokens in the InfluxDB UI](#manage-tokens-in-the-influxdb-ui) +- [Create a token in the InfluxDB UI](#create-a-token-in-the-influxdb-ui) +- [Create a token using the influx CLI](#create-a-token-using-the-influx-cli) +- [Create a token using the InfluxDB API](#create-a-token-using-the-influxdb-api) + +## Manage tokens in the InfluxDB UI + +To manage InfluxDB API Tokens in the InfluxDB UI, navigate to the **API Tokens** management page. + +{{% oss-only %}} + +In the navigation menu on the left, select **Data (Load Data)** > **Tokens**. + +{{% /oss-only %}} + +{{% cloud-only %}} + +In the navigation menu on the left, select **Load Data** > **API Tokens**. + +{{% /cloud-only %}} + +{{< nav-icon "load-data" >}} + +## Create a token in the InfluxDB UI + +{{% oss-only %}} + +1. From the [API Tokens management page](#manage-tokens-in-the-influxdb-ui), +click **{{< icon "plus" >}} Generate** and select a token type + (**Read/Write Token** or **All Access API Token**). +2. In the window that appears, enter a description for your token in the **Description** field. +3. If generating a **read/write token**: + - Search for and select buckets to read from in the **Read** pane. + - Search for and select buckets to write to in the **Write** pane. +4. Click **Save**. + +{{% /oss-only %}} + + +{{% cloud-only %}} + +### Create an all-access token + +1. From the [API Tokens management page](#manage-tokens-in-the-influxdb-ui), +click the **{{< icon "plus" >}} {{< caps >}}Generate API Token{{< /caps >}}** button. +2. Select **All Access API Token**. + +### Create a custom token + +1. From the [API Tokens management page](#manage-tokens-in-the-influxdb-ui), +click the **{{< icon "plus" >}} {{< caps >}}Generate API Token{{< /caps >}}** button. +2. Select **Custom API Token**. +3. When the **Generate a Personal API Token** window appears, enter a description. If you don't provide a description for the token, InfluxDB will generate a description from the permissions you assign. + For example, if you select **Read** for a bucket named "\_monitoring" and **Write** for a bucket named "\_tasks", InfluxDB will generate the description "Read buckets \_monitoring Write buckets \_tasks". +4. Select the check boxes in the **Read** and **Write** columns to assign access permissions for the token. You can enable access to all buckets, individual buckets, Telegraf configurations, and other InfluxDB resources. By default, the new token has no access permissions. +5. When you're finished, click **{{< caps >}}Generate{{< /caps >}}**. +6. When InfluxDB displays the token value, click **{{< caps >}}Copy to Clipboard{{< /caps >}}**. This is your only chance to access and copy the token value from InfluxDB. +7. (Optional) Store the API token value in a secure password vault. + +### Clone a token + +To create a token with the same authorizations as an existing token, clone the existing token. + +1. From the [API Tokens management page](#manage-tokens-in-the-influxdb-ui), +find the token you want to clone and click the **{{< icon "settings" >}}** icon located far right of the token description. +3. Select **Clone**. +3. When InfluxDB UI displays the created token, click **{{< caps >}}Copy to Clipboard{{< /caps >}}**. This is your only chance to access and copy the token value from InfluxDB. +4. (Optional) Store the API token value in a secure password vault. + +{{% /cloud-only %}} + +## Create a token using the influx CLI + +{{% warn %}} +InfluxDB 2.4 introduced a bug that prevents you from creating an **all-access** or **operator** token using the `influx auth create` command, and causes the following error: `Error: could not write auth with provided arguments: 403 Forbidden: permission.` + +Until this bug is resolved in the next influx CLI release, please use the [workaround below to create an all-access or operator token](/influxdb/v2.5/security/tokens/create-token/#workaround-to-create-an-all-access-or-operator-token). +{{% /warn %}} + +### **Workaround:** To create an all-access or operator token + +- Use the following command to create an [all-access](/influxdb/v2.5/security/tokens/#all-access-token) or [operator](/influxdb/v2.5/security/tokens/#operator-token) token. For an operator token, you must also include the `--read-orgs` and `--write-orgs` flags. + +```sh +influx auth create + --org-id or --org \ + --read-authorizations \ + --write-authorizations \ + --read-buckets \ + --write-buckets \ + --read-dashboards \ + --write-dashboards \ + --read-tasks \ + --write-tasks \ + --read-telegrafs \ + --write-telegrafs \ + --read-users \ + --write-users \ + --read-variables \ + --write-variables \ + --read-secrets \ + --write-secrets \ + --read-labels \ + --write-labels \ + --read-views \ + --write-views \ + --read-documents \ + --write-documents \ + --read-notificationRules \ + --write-notificationRules \ + --read-notificationEndpoints \ + --write-notificationEndpoints \ + --read-checks \ + --write-checks \ + --read-dbrp \ + --write-dbrp \ + --read-annotations \ + --write-annotations \ + --read-sources \ + --write-sources \ + --read-scrapers \ + --write-scrapers \ + --read-notebooks \ + --write-notebooks \ + --read-remotes \ + --write-remotes \ + --read-replications \ + --write-replications +``` + + \ No newline at end of file diff --git a/content/influxdb/v2.5/security/tokens/delete-token.md b/content/influxdb/v2.5/security/tokens/delete-token.md new file mode 100644 index 000000000..795984cd6 --- /dev/null +++ b/content/influxdb/v2.5/security/tokens/delete-token.md @@ -0,0 +1,87 @@ +--- +title: Delete a token +seotitle: Delete an API token from InfluxDB +description: Delete an API token from InfluxDB using the InfluxDB UI or the `influx` CLI. +aliases: + - /influxdb/v2.5/users/tokens/delete-token +menu: + influxdb_2_5: + name: Delete a token + parent: Manage tokens +weight: 204 +--- + +Delete API tokens from the InfluxDB user interface (UI) or the `influx` command line interface (CLI). +Once deleted, all users and external integrations using the API token will no longer +have access to your InfluxDB instance. + +- [Delete tokens in the InfluxDB UI](#delete-tokens-in-the-influxdb-ui) +- [Delete a token using the influx CLI](#delete-a-token-using-the-influx-cli) +- [Delete a token using the InfluxDB API](#delete-a-token-using-the-influxdb-api) + +## Delete tokens in the InfluxDB UI + +{{% oss-only %}} + +1. In the navigation menu on the left, select **Data (Load Data)** > **Tokens**. + +{{< nav-icon "load-data" >}} + +2. Hover over the token you want to delete. +3. Click the **{{< icon "delete" >}}** icon located far right of the token description. +3. Click **Delete** to delete the token. + +{{% /oss-only %}} + +{{% cloud-only %}} + +1. In the navigation menu on the left, select **Load Data** > **API Tokens**. + +{{< nav-icon "data" >}} + +2. Find the token that you would like to delete. +3. Click the **{{< icon "delete" >}}** icon located far right of the token description. +4. Click **{{< caps >}}Confirm{{< /caps >}}** to delete the token. + +{{% /cloud-only %}} + +## Delete a token using the influx CLI + +Use the [`influx auth delete` command](/influxdb/v2.5/reference/cli/influx/auth/delete) +to delete a token. + +_This command requires an auth ID, which is available in the output of `influx auth find`._ + +```sh +# Syntax +influx auth delete -i + +# Example +influx auth delete -i 03a2bee5a9c9a000 +``` + +## Delete a token using the InfluxDB API + +Use the `/api/v2/authorizations` InfluxDB API endpoint to delete a token. + +[{{< api-endpoint method="DELETE" endpoint="http://localhost:8086/api/v2/authorizations/AUTH_ID" >}}](/influxdb/v2.5/api/#operation/DeleteAuthorizationsID) + +Include the following in your request: + +| Requirement | Include by | +|:----------- |:---------- | +| API token with the [`write: authorizations`](/influxdb/v2.5/api/#operation/PostAuthorizations) permission | Use the `Authorization: Token YOUR_API_TOKEN` header. | +| Authorization ID | URL path parameter. | + +```sh +# Delete the first authorization listed for the user. +curl --request GET \ + "http://localhost:8086/api/v2/authorizations?user=user2" \ + --header "Authorization: Token ${INFLUX_OP_TOKEN}" \ + --header 'Content-type: application/json' \ +| jq .authorizations[0].id \ +| xargs -I authid curl --request DELETE \ + http://localhost:8086/api/v2/authorizations/authid \ + --header "Authorization: Token ${INFLUX_OP_TOKEN}" \ + --header 'Content-type: application/json' +``` diff --git a/content/influxdb/v2.5/security/tokens/update-tokens.md b/content/influxdb/v2.5/security/tokens/update-tokens.md new file mode 100644 index 000000000..51051c7ca --- /dev/null +++ b/content/influxdb/v2.5/security/tokens/update-tokens.md @@ -0,0 +1,129 @@ +--- +title: Update a token +seotitle: Update API tokens in InfluxDB +description: Update API tokens' descriptions in InfluxDB using the InfluxDB UI. +aliases: + - /influxdb/v2.5/users/tokens/update-tokens +menu: + influxdb_2_5: + name: Update a token + parent: Manage tokens +weight: 203 +--- + +Update an API token's description and status. +using the InfluxDB user interface (UI). + +- [Update a token in the InfluxDB UI](#update-a-token-in-the-influxdb-ui) +- [Enable or disable a token in the InfluxDB UI](#enable-or-disable-a-token-in-the-influxdb-ui) +- [Enable a token using the influx CLI](#enable-a-token-using-the-influx-cli) +- [Disable a token using the influx CLI](#disable-a-token-using-the-influx-cli) +- [Update a token using the InfluxDB API](#update-a-token-using-the-influxdb-api) + +## Update a token in the InfluxDB UI + +1. In the navigation menu on the left, select **Data (Load Data)** > **Tokens**. + +{{< nav-icon "load-data" >}} + +2. Click the pencil icon {{< icon "pencil" >}} next to the token's name in the **Description** column. +3. Update the token description, then click anywhere else to save. + +## Enable or disable a token in the InfluxDB UI + +{{% oss-only %}} + +1. In the navigation menu on the left, select **Data (Load Data)** > **Tokens**. + +{{< nav-icon "load-data" >}} + +2. Click the **{{< icon "toggle-green" >}} Status** toggle. + +{{% /oss-only %}} + +{{% cloud-only %}} + +1. In the navigation menu on the left, select **Load Data** > **API Tokens**. + + {{< nav-icon "data" >}} + +2. Find the token that you would like to enable or disable. +3. Click the token description. +4. Click the **{{< icon "toggle-blue" >}} Status** toggle. + +{{% /cloud-only %}} + +## Enable a token using the influx CLI + +Use the [`influx auth active` command](/influxdb/v2.5/reference/cli/influx/auth/active) +to activate a token. + +_This command requires an authorization ID, which is available in the output of `influx auth find`._ + +```sh +# Syntax +influx auth active -i + +# Example +influx auth active -i 0804f74142bbf000 +``` +To get the current status of a token, use the JSON output of the [`influx auth list` command](/influxdb/v2.5/reference/cli/influx/auth/list). + +```sh +influx auth find --json +``` + +### Disable a token using the influx CLI + +Use the [`influx auth inactive` command](/influxdb/v2.5/reference/cli/influx/auth/active) +to deactivate a token. + +_This command requires an authorization ID, which is available in the output of `influx auth find`._ + +```sh +# Syntax +influx auth inactive -i + +# Example +influx auth inactive -i 0804f74142bbf000 +``` + +To get the current status of a token, use the JSON output of the [`influx auth list` command](/influxdb/v2.5/reference/cli/influx/auth/list). + +```sh +influx auth find --json +``` + +## Update a token using the InfluxDB API + +Use the `/api/v2/authorizations` InfluxDB API endpoint to update the description and status of a token. + +[{{< api-endpoint method="PATCH" endpoint="http://localhost:8086/api/v2/authorizations/AUTH_ID" >}}](/influxdb/v2.5/api/#operation/PatchAuthorizationsID) + +Include the following in your request: + +| Requirement | Include by | +|:----------- |:---------- | +| API token with the [`write: authorizations`](/influxdb/v2.5/api/#operation/PostAuthorizations) permission | Use the `Authorization: Token YOUR_API_TOKEN` header. | +| Authorization ID | URL path parameter. | +| Description and/or Status | Pass as `description`, `status` in the request body. | + +### Disable a token + +```sh +# Update the description and status of the first authorization listed for the user. + +curl --request GET \ + "http://localhost:8086/api/v2/authorizations?user=user2" \ + --header "Authorization: Token ${INFLUX_TOKEN}" \ + --header 'Content-type: application/json' \ +| jq .authorizations[0].id \ +| xargs -I authid curl --request PATCH \ + http://localhost:8086/api/v2/authorizations/authid \ + --header "Authorization: Token ${INFLUX_TOKEN}" \ + --header 'Content-type: application/json' \ + --data '{ + "description": "deactivated_auth", + "status": "inactive" + }' | jq . +``` diff --git a/content/influxdb/v2.5/security/tokens/use-tokens.md b/content/influxdb/v2.5/security/tokens/use-tokens.md new file mode 100644 index 000000000..23aa30b17 --- /dev/null +++ b/content/influxdb/v2.5/security/tokens/use-tokens.md @@ -0,0 +1,44 @@ +--- +title: Use tokens +seotitle: Use an API token in InfluxDB +description: Use an API token in the InfluxDB UI, the `influx` CLI, or the InfluxDB API. +aliases: + - /influxdb/v2.5/users/tokens/use-tokens +menu: + influxdb_2_5: + name: Use tokens + parent: Manage tokens +weight: 204 +--- + +Use tokens to authenticate requests to InfluxDB, including requests to write, query, and manage data and resources. +Authenticate requests using the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/), API requests made with client libraries, and tools like `curl`. + +### Add a token to a CLI request + +```sh +influx write -t -b BUCKET -o org-name +``` + +``` +export INFLUX_TOKEN=my-token +influx write -t $INFLUX_TOKEN -b my-bucket -o my-org "measurement field=1" +``` + +{{% note %}} +See [here](/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/#configure-your-token-as-an-environment-variable) +to configure environment variables on Windows. +(Click on the **Windows** tab.) +{{% /note %}} + +### Use CLI configurations + +Automatically manage and use tokens from the CLI using [`influx config`](/influxdb/v2.5/reference/cli/influx/config/). + +### Use a token in an API request + +Use tokens in [API requests](/influxdb/v2.5/api-guide/api_intro/#authentication). + +### Use a token in Postman + +Make authenticated requests with tokens [using Postman](/influxdb/v2.5/api-guide/postman/). diff --git a/content/influxdb/v2.5/security/tokens/view-tokens.md b/content/influxdb/v2.5/security/tokens/view-tokens.md new file mode 100644 index 000000000..0c2a36b8c --- /dev/null +++ b/content/influxdb/v2.5/security/tokens/view-tokens.md @@ -0,0 +1,110 @@ +--- +title: View tokens +seotitle: View API tokens in InfluxDB +description: View API tokens in InfluxDB using the InfluxDB UI, the `influx` CLI, or the InfluxDB API. +aliases: + - /influxdb/v2.5/users/tokens/view-tokens +menu: + influxdb_2_5: + name: View tokens + parent: Manage tokens +weight: 202 +--- + +View API tokens and permissions using the InfluxDB user interface (UI), +the `influx` command line interface (CLI), or the InfluxDB API. + +{{% note %}} + +{{% oss-only %}}Tokens are visible to the user who created the token. Users who own a token with Operator permissions also have access to all tokens. +Tokens stop working when the user who created the token is deleted. + +**We recommend creating a generic user to create and manage tokens for writing data.** +{{% /oss-only %}} + +{{% cloud-only %}} +To follow best practices for secure API token generation and retrieval, InfluxDB Cloud enforces access restrictions on API tokens. + - InfluxDB Cloud UI only allows access to the API token value immediately after the token is created. + - You can't change access (**read/write**) permissions for an API token after it's created. + - Tokens stop working when the user who created the token is deleted. + +We recommend the following for managing your tokens: +- Create a generic user to create and manage tokens for writing data. +- Store your tokens in a secure password vault for future access. +{{% /cloud-only %}} +{{% /note %}} + +## View tokens in the InfluxDB UI + +{{% oss-only %}} + +1. In the navigation menu on the left, select **Data (Load Data)** > **API Tokens**. + +{{< nav-icon "load-data" >}} + +2. Click a token name in the list to view the token and a summary of access permissions. + +{{% /oss-only %}} + +{{% cloud-only %}} + +1. In the navigation menu on the left, select **Load Data** > **API Tokens**. + +{{< nav-icon "load-data" >}} + +2. Click a token description in the list to view the token status and a list of access permissions. + +{{% /cloud-only %}} + +## View tokens using the influx CLI + +Use the [`influx auth list` command](/influxdb/v2.5/reference/cli/influx/auth/list) +to view tokens. + +```sh +influx auth list +``` + +Filtering options such as filtering by authorization ID, username, or user ID are available. +See the [`influx auth list` documentation](/influxdb/v2.5/reference/cli/influx/auth/list) +for information about other available flags. + +## View tokens using the InfluxDB API + +Use the `/api/v2/authorizations` InfluxDB API endpoint to view tokens and permissions. + +[{{< api-endpoint method="GET" endpoint="/api/v2/authorizations" >}}](/influxdb/cloud/api/#operation/GetAuthorizations) + +Include the following in your request: + +| Requirement | Include by | +|:----------- |:---------- | +| API token with the [`read: authorizations`](/influxdb/v2.5/api/#operation/PostAuthorizations) permission | Use the `Authorization: Token YOUR_API_TOKEN` header. | + +```sh +{{% get-shared-text "api/v2.0/auth/oss/tokens-view.sh" %}} +``` + +### View a single token + +To view a specific authorization and token, include the authorization ID in the URL path. + +{{% api-endpoint method="GET" endpoint="/api/v2/authorizations/{authID}" %}} + +### Filter the token list + +InfluxDB returns authorizations from the same organization as the token used in the request. +To filter tokens by user, include `userID` as a query parameter in your request. + +```sh +{{% get-shared-text "api/v2.0/auth/oss/tokens-view-filter.sh" %}} +``` + +{{% oss-only %}} + +[***Operator tokens***](/{{% latest "influxdb" %}}/security/tokens/#operator-token) have access to all organizations' authorizations. +To filter authorizations by organization when using an operator token, include an `org` or `orgID` query parameter in your request. + +{{% /oss-only %}} + +See the [`/authorizations` endpoint documentation](/influxdb/v2.5/api/#tag/Authorizations) for more information about available parameters. diff --git a/content/influxdb/v2.5/tags/_index.md b/content/influxdb/v2.5/tags/_index.md new file mode 100644 index 000000000..3a3d5c828 --- /dev/null +++ b/content/influxdb/v2.5/tags/_index.md @@ -0,0 +1,4 @@ +--- +title: Tags and related content +layout: tags-landing +--- diff --git a/content/influxdb/v2.5/telegraf-configs/_index.md b/content/influxdb/v2.5/telegraf-configs/_index.md new file mode 100644 index 000000000..b374ee3cf --- /dev/null +++ b/content/influxdb/v2.5/telegraf-configs/_index.md @@ -0,0 +1,37 @@ +--- +title: Telegraf configurations +description: > + InfluxDB OSS lets you automatically generate Telegraf configurations or upload custom + Telegraf configurations that collect metrics and write them to InfluxDB OSS. +weight: 12 +menu: influxdb_2_5 +influxdb/v2.5/tags: [telegraf] +related: + - /influxdb/v2.5/write-data/no-code/use-telegraf/manual-config/ + - /influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/ +--- + +InfluxDB lets you automatically generate Telegraf configurations or upload custom +Telegraf configurations that collect metrics and write them to InfluxDB. +Telegraf retrieves configurations from InfluxDB on startup. + +## Use InfluxDB Telegraf configurations +With a Telegraf configuration stored in InfluxDB, the `telegraf` agent can retrieve +the configuration from an InfluxDB HTTP(S) endpoint. + +- Ensure Telegraf has network access to InfluxDB (OSS or Cloud). +- Start the `telegraf` agent using the `--config` flag to provide the URL of the + InfluxDB Telegraf configuration. For example: + + ```sh + telegraf --config http://localhost:8086/api/v2/telegrafs/ + ``` + +{{% note %}} +_[Setup instructions](/influxdb/v2.5/telegraf-configs/view/#view-setup-instructions) for +each Telegraf configuration are provided in the InfluxDB UI._ +{{% /note %}} + +## Manage Telegraf configurations + +{{< children >}} diff --git a/content/influxdb/v2.5/telegraf-configs/clone.md b/content/influxdb/v2.5/telegraf-configs/clone.md new file mode 100644 index 000000000..02e17b952 --- /dev/null +++ b/content/influxdb/v2.5/telegraf-configs/clone.md @@ -0,0 +1,21 @@ +--- +title: Clone a Telegraf configuration +description: > + Use the InfluxDB UI to clone an Telegraf configuration. +weight: 101 +menu: + influxdb_2_5: + name: Clone a config + parent: Telegraf configurations +--- + +Use the InfluxDB user interface (UI) to clone a Telegraf configuration. + + +1. In the navigation menu on the left, select **Data** (**Load Data**) > **Telegraf**. + + {{< nav-icon "load data" >}} + +2. Hover over the configuration you want to clone, click the **{{< icon "clone" >}}** + icon, and then click **Clone**. + The clone appears in your list of Telegraf configurations. diff --git a/content/influxdb/v2.5/telegraf-configs/create.md b/content/influxdb/v2.5/telegraf-configs/create.md new file mode 100644 index 000000000..39d11d2b8 --- /dev/null +++ b/content/influxdb/v2.5/telegraf-configs/create.md @@ -0,0 +1,72 @@ +--- +title: Create a Telegraf configuration +description: > + Use the InfluxDB UI or the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) + to create an Telegraf configuration. +weight: 101 +menu: + influxdb_2_5: + name: Create a config + parent: Telegraf configurations +related: + - /influxdb/v2.5/write-data/no-code/use-telegraf/manual-config/ + - /influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/ + - /influxdb/v2.5/telegraf-configs/update/ +--- +Telegraf has an extensive list of plugins for many different technologies and use cases. +Not all plugins are available through the InfluxDB UI, but you can +[create and upload custom Telegraf configurations](#create-a-custom-telegraf-configuration) +to include any of the available [Telegraf plugins](/{{< latest "telegraf" >}}/plugins/). + +Use the InfluxDB user interface (UI) or the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) +to create a Telegraf configuration. + +To create a Telegraf configuration, do one of the following: + +- [Use the InfluxDB UI](#use-the-influxdb-ui) +- [Use the `influx` CLI](#use-the-influx-cli) +- [Create a custom Telegraf configuration](#create-a-custom-telegraf-configuration) + +## Use the InfluxDB UI +Use the InfluxDB UI to automatically generate and store new Telegraf configurations in InfluxDB. +Creating the configuration in the UI lets you select from a list of available technologies and generates a Telegraf configuration to collect metrics from those technologies. +For more information, see [Automatically configure Telegraf](/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/). + +## Use the influx CLI +Use the [`influx telegrafs create` command](/influxdb/v2.5/reference/cli/influx/telegrafs/create/) +to upload a Telegraf configuration file from your local machine and create a new Telegraf +configuration in InfluxDB. + +Provide the following: + +- **Telegraf configuration name** +- **Telegraf configuration description** +- **Telegraf configuration file** + +{{% note %}} +If a **name** and **description** are not provided, they are set to empty strings. +{{% /note %}} + + +```sh +# Syntax +influx telegrafs create \ + -n \ + -d \ + -f /path/to/telegraf.conf + +# Example +influx telegrafs create \ + -n "Example Telegraf config" + -d "This is a description for an example Telegraf configuration." + -f /path/to/telegraf.conf +``` + +## Create a custom Telegraf configuration + +1. Create a custom Telegraf configuration file that includes the `outputs.influxdb_v2` + output plugin. _See [Manually configure Telegraf](/influxdb/v2.5/write-data/no-code/use-telegraf/manual-config/)_ + for more information. +2. Add and customize [Telegraf plugins](/{{< latest "telegraf" >}}/plugins/) and save your changes. +3. [Use the `influx telegrafs create` command](#use-the-influx-cli) to upload your + custom Telegraf configuration to InfluxDB. diff --git a/content/influxdb/v2.5/telegraf-configs/remove.md b/content/influxdb/v2.5/telegraf-configs/remove.md new file mode 100644 index 000000000..d523d4d66 --- /dev/null +++ b/content/influxdb/v2.5/telegraf-configs/remove.md @@ -0,0 +1,53 @@ +--- +title: Remove a Telegraf configuration +description: > + Use the InfluxDB UI or the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) + to remove Telegraf configurations from InfluxDB. +weight: 104 +menu: + influxdb_2_5: + name: Remove a config + parent: Telegraf configurations +aliases: + - /influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/delete-telegraf-config/ + - /influxdb/v2.5/collect-data/use-telegraf/auto-config/delete-telegraf-config +--- + +Use the InfluxDB user interface (UI) or the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) +to remove Telegraf configurations from InfluxDB. + +{{% note %}} +Deleting a Telegraf configuration does not affect _**running**_ Telegraf agents. +However, if an agents stops, it needs a new configuration to start. +{{% /note %}} + +To remove a Telegraf configuration, do one of the following: + +- [Use the InfluxDB UI](#use-the-influxdb-ui) +- [Use the `influx` CLI](#use-the-influx-cli) + +## Use the InfluxDB UI + +1. In the navigation menu on the left, select **Data** (**Load Data**) > **Telegraf**. + + {{< nav-icon "load data" >}} + +2. Hover over the configuration you want to delete, click the **{{< icon "trash" >}}** + icon, and then click **Delete**. + + +## Use the influx CLI +Use the [`influx telegrafs rm` command](/influxdb/v2.5/reference/cli/influx/telegrafs/rm/) +to remove a Telegraf configuration from InfluxDB. + +Provide the following: + +- **Telegraf configuration ID** (shown in the output of `influx telegrafs`) + +```sh +# Syntax +influx telegrafs rm -i + +# Example +influx telegrafs rm -i 12ab34de56fg78hi +``` diff --git a/content/influxdb/v2.5/telegraf-configs/update.md b/content/influxdb/v2.5/telegraf-configs/update.md new file mode 100644 index 000000000..53a7fa876 --- /dev/null +++ b/content/influxdb/v2.5/telegraf-configs/update.md @@ -0,0 +1,102 @@ +--- +title: Update a Telegraf configuration +description: > + Use the InfluxDB user interface (UI) or the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) + to update InfluxDB Telegraf configurations. +weight: 103 +menu: + influxdb_2_5: + name: Update a config + parent: Telegraf configurations +aliases: + - /influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/update-telegraf-config/ + - /influxdb/v2.5/collect-data/use-telegraf/auto-config/update-telegraf-config +--- + +Use the InfluxDB user interface (UI) or the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) +to update InfluxDB Telegraf configurations. + +To update a Telegraf configuration, do one of the following: + +- [Use the InfluxDB UI](#use-the-influxdb-ui) +- [Use the `influx` CLI](#use-the-influx-cli) + +{{% note %}} +Telegraf doesn't detect changes to remote configurations. If you edit a remote configuration, you must restart Telegraf or send it a SIGHUP command for your changes to take effect. +{{% /note %}} + +## Use the InfluxDB UI + +### Update the name or description of a configuration + +1. In the navigation menu on the left, select **Data** (**Load Data**) > **Telegraf**. + + {{< nav-icon "load data" >}} + +2. Hover over the configuration you want to edit and click **{{< icon "pencil" >}}** + to update the name or description. +3. Press **Return** or click out of the editable field to save your changes. + +### Edit the configuration file directly in the UI + +1. In the navigation menu on the left, select **Data** (**Load Data**) > **Telegraf**. + + {{< nav-icon "load data" >}} + +2. To edit the configuration file: + a. Click the name of the configuration. + b. Add or update [Telegraf plugin settings](/{{< latest "telegraf" >}}/plugins/) in the window that appears. + {{% note %}} + The text editor window doesn't detect if any plugins or settings are misconfigured. Any errors in your configuration that may cause Telegraf to fail when you restart it. + {{% /note %}} + c. Click **Save Changes** and then **Save** again to confirm. +3. To apply the updated configuration, restart Telegraf. To find the exact command to start Telegraf, click **Setup Instructions** on the **Telegraf** page. + +### Download and verify the configuration file + +1. In the navigation menu on the left, select **Data** (**Load Data**) > **Telegraf**. + + {{< nav-icon "load data" >}} + +2. Click the **name** of the Telegraf configuration to customize. +3. Click **Download Config** to download the Telegraf configuration file to your + local machine. +4. Review the configuration file. Add or update [Telegraf plugin](/{{< latest "telegraf" >}}/plugins/) settings and + save your changes. +5. [Use the `influx telegrafs update` command](#use-the-influx-cli) to upload your + modified Telegraf configuration to InfluxDB and replace the existing configuration. +6. To apply the updated configuration, restart Telegraf. To find the exact command to start Telegraf, click **Setup Instructions** on the **Telegraf** page. + +## Use the influx CLI + +Use the [`influx telegrafs update` command](/influxdb/v2.5/reference/cli/influx/telegrafs/update/) +to update an existing InfluxDB Telegraf configuration name, description, or settings +from a Telegraf configuration file on your local machine. + +Provide the following: + +- **Telegraf configuration ID** (shown in the output of `influx telegrafs`) +- **Telegraf configuration name** +- **Telegraf configuration description** +- **Telegraf configuration file** + +{{% warn %}} +If a **name** and **description** are not provided, they are replaced with empty strings. +{{% /warn %}} + + +```sh +# Syntax +influx telegrafs update \ + -i \ + -n \ + -d \ + -f /path/to/telegraf.conf + +# Example +influx telegrafs update \ + -i 12ab34de56fg78hi + -n "Example Telegraf config" + -d "This is a description for an example Telegraf configuration." + -f /path/to/telegraf.conf +``` diff --git a/content/influxdb/v2.5/telegraf-configs/view.md b/content/influxdb/v2.5/telegraf-configs/view.md new file mode 100644 index 000000000..bce03fef7 --- /dev/null +++ b/content/influxdb/v2.5/telegraf-configs/view.md @@ -0,0 +1,45 @@ +--- +title: View Telegraf configurations +description: > + Use the InfluxDB user interface (UI) or the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) + to view and download InfluxDB Telegraf configurations. +weight: 102 +menu: + influxdb_2_5: + name: View configs + parent: Telegraf configurations +aliases: + - /influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/view-telegraf-config/ + - /influxdb/v2.5/collect-data/use-telegraf/auto-config/view-telegraf-config +--- + +Use the InfluxDB user interface (UI) or the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) +to view and download InfluxDB Telegraf configurations. + +To view Telegraf configurations, do one of the following: + +- [Use the InfluxDB UI](#use-the-influxdb-ui) +- [Use the `influx` CLI](#use-the-influx-cli) + +## Use the InfluxDB UI +In the navigation menu on the left, select **Data** (**Load Data**) > **Telegraf**. + +{{< nav-icon "load data" >}} + +### View and download the telegraf.conf +To view the `telegraf.conf` associated with the configuration, +click the **Name** of the configuration. +Then click **Download Config** to download the file. + +### View setup instructions +To view the setup instructions for a Telegraf configuration, click **Setup Instructions**. +Setup instructions include commands for adding your InfluxDB API token +as an environment variable and starting Telegraf with the specific configuration. + +## Use the influx CLI +Use the [`influx telegrafs` command](/influxdb/v2.5/reference/cli/influx/telegrafs/) to +list Telegraf configurations stored in InfluxDB. + +```sh +influx telegrafs +``` diff --git a/content/influxdb/v2.5/tools/_index.md b/content/influxdb/v2.5/tools/_index.md new file mode 100644 index 000000000..61250a701 --- /dev/null +++ b/content/influxdb/v2.5/tools/_index.md @@ -0,0 +1,11 @@ +--- +title: InfluxDB tools and integrations +description: > + Use InfluxDB tools and other third-party integrations to interact with, manage, and visualize data in InfluxDB. +weight: 13 +menu: + influxdb_2_5: + name: Tools & integrations +--- + +{{< children >}} diff --git a/content/influxdb/v2.5/tools/chronograf.md b/content/influxdb/v2.5/tools/chronograf.md new file mode 100644 index 000000000..2f1e3d81b --- /dev/null +++ b/content/influxdb/v2.5/tools/chronograf.md @@ -0,0 +1,95 @@ +--- +title: Use Chronograf with InfluxDB OSS +description: > + Chronograf is a data visualization and dashboarding tool designed to visualize data in InfluxDB 1.x. + It is part of the [TICKstack](/platform/) that provides an InfluxQL data explorer, Kapacitor integrations, and more. + Continue to use Chronograf with **InfluxDB Cloud** and **InfluxDB OSS 2.x** and the + [1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). +menu: + influxdb_2_5: + name: Use Chronograf + parent: Tools & integrations +weight: 103 +related: + - /{{< latest "chronograf" >}}/ +--- + +[Chronograf](/{{< latest "chronograf" >}}/) is a data visualization and dashboarding +tool designed to visualize data in InfluxDB 1.x. It is part of the [TICKstack](/platform/) +that provides an InfluxQL data explorer, Kapacitor integrations, and more. +Continue to use Chronograf with **InfluxDB Cloud** and **InfluxDB OSS {{< current-version >}}** and the +[1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). + + +## Create an InfluxDB connection +1. In Chronograf, click **Configuration** in the left navigation bar, + and then click **{{< icon "plus" >}} Add Connection**. +2. Toggle the **InfluxDB v2 Auth** option at the bottom of the form. + {{< img-hd src="/img/influxdb/2-0-tools-chronograf-v2-auth.png" alt="InfluxDB v2 Auth toggle" />}} +3. Enter your InfluxDB connection credentials: + - **Connection URL:** InfluxDB URL _(see [InfluxDB Cloud regions](/influxdb/cloud/reference/regions/) + or [InfluxDB OSS URLs](/influxdb/%762.1/reference/urls/))_ + + ``` + http://localhost:8086 + ``` + + - **Connection Name:** Name to uniquely identify this connection configuration + - **Organization:** InfluxDB [organization](/influxdb/v2.5/organizations/) + - **Token:** InfluxDB [API token](/influxdb/v2.5/security/tokens/) + - **Telegraf Database Name:** InfluxDB [bucket](/influxdb/v2.5/organizations/buckets/) + Chronograf uses to populate parts of the application, including the Host List page (default is `telegraf`) + - **Default Retention Policy:** default [retention policy](/{{< latest "influxdb" "v1" >}}/concepts/glossary/#retention-policy-rp) + _**(leave blank)**_ + + {{% note %}} +#### DBRPs map to InfluxDB buckets +In InfluxDB {{< current-version >}}, database/retention-policy (DBRP) combinations +are mapped to buckets using the `database-name/retention-policy` naming convention. +**DBRP mappings are required to query InfluxDB {{< current-version >}} using InfluxQL.** + +For information, see [DBRP mapping](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/){{% oss-only %}}.{{% /oss-only %}} +{{% cloud-only %}}and [Create DBRP mappings](/influxdb/v2.5/query-data/influxql/dbrp/#create-dbrp-mappings).{{% /cloud-only %}} + {{% /note %}} + +3. Click **Add Connection**. +4. Select the dashboards you would like to create, and then click **Next**. +5. To configure a Kapacitor connection, provide the necessary credentials, + and then click **Continue**. Otherwise, click **Skip**. + _For information about using Kapacitor with InfluxDB Cloud or InfluxDB OSS {{< current-version >}}, + see [Use Kapacitor with InfluxDB](/influxdb/v2.5/tools/kapacitor/)._ +6. Click **Finish**. + +## Important notes + +- [Update upgraded InfluxDB connections](#Update-upgraded-InfluxDB-connections) +- [No administrative functionality](#No-administrative-functionality) +- [Limited InfluxQL support](#Limited-InfluxQL-support) + +### Update upgraded InfluxDB connections +If using Chronograf with an InfluxDB instance that was upgraded from 1.x +to 2.x, update your InfluxDB connection configuration in Chronograf to use the +**InfluxDB v2 Auth** option and provide an organization and a token. +**Without an organization, Chronograf cannot use Flux to query InfluxDB.** + +### No administrative functionality +Chronograf cannot be used for administrative tasks in InfluxDB Cloud and InfluxDB OSS {{< current-version >}}. +For example, you **cannot** do the following: + +- Define databases +- Modify retention policies +- Add users +- Kill queries + +When connected to an InfluxDB Cloud or InfluxDB {{< current-version >}} database, functionality in the +**{{< icon "crown" >}} InfluxDB Admin** section of Chronograf is disabled. + +To complete administrative tasks, use the following: + +- **InfluxDB user interface (UI)** +- [InfluxDB CLI](/influxdb/v2.5/reference/cli/influx/) +- [InfluxDB v2 API](/influxdb/v2.5/reference/api/) + +### Limited InfluxQL support +InfluxDB Cloud and InfluxDB OSS {{< current-version >}} support InfluxQL **read-only** queries. +For more information, see [InfluxQL support](/influxdb/v2.5/query-data/influxql/#influxql-support). diff --git a/content/influxdb/v2.5/tools/flux-repl.md b/content/influxdb/v2.5/tools/flux-repl.md new file mode 100644 index 000000000..5a1e04d6b --- /dev/null +++ b/content/influxdb/v2.5/tools/flux-repl.md @@ -0,0 +1,61 @@ +--- +title: Use the Interactive Flux REPL +description: > + Use the Flux REPL (Read–Eval–Print Loop) to execute Flux scripts and interact + with InfluxDB and other data sources. +influxdb/v2.5/tags: [flux] +menu: + influxdb_2_5: + name: Use the Flux REPL + parent: Tools & integrations +weight: 103 +aliases: + - /influxdb/v2.5/tools/repl/ +--- + +Use the Flux REPL (Read–Eval–Print Loop) to execute Flux scripts and interact with InfluxDB and other data sources. +[Build the REPL](#build-the-repl) from the Flux source code. + +{{% note %}} +Flux REPL supports running Flux scripts against InfluxDB 1.8+. +{{% /note %}} + +## Build the REPL + +To use the Flux REPL, build it from source using the [Flux repository](https://github.com/influxdata/flux/). +For instructions, see the [Flux repository README](https://github.com/influxdata/flux/#requirements). + +## Use the REPL + +- [Open a REPL session](#open-a-repl-session) +- [Query data from InfluxDB](#query-data-from-influxdb) +- [Multi-line entries](#multi-line-entries) +- [Exit the REPL](#exit-the-repl) + +### Open a REPL session +To open a new REPL session, run: + +```sh +./flux repl +``` + +### Query data from InfluxDB +To query data from InfluxDB (local or remote), provide the host, organization, and token parameters +to the [`from()` function](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/from/). + +```js +from( + bucket: "example-bucket", + host: "http://localhost:8086", + org: "example-org", + token: "My5uP3rS3cRetT0k3n", +) +``` + +### Multi-line entries +Multi-line scripts like the example above work when pasted into the REPL. +Pasting newlines from the clipboard is allowed. +However, you cannot enter newline characters directly from the keyboard. + +### Exit the REPL +Exit the REPL by pressing **Control + D**. diff --git a/content/influxdb/v2.5/tools/flux-vim-lsp.md b/content/influxdb/v2.5/tools/flux-vim-lsp.md new file mode 100644 index 000000000..d5ccf8997 --- /dev/null +++ b/content/influxdb/v2.5/tools/flux-vim-lsp.md @@ -0,0 +1,111 @@ +--- +title: Use the Flux LSP with Vim +description: > + Use the Flux LSP with Vim to add auto-completion, syntax checking, and other language-specific features to your editor. +menu: + influxdb_2_5: + parent: Tools & integrations +weight: 102 +--- + +## Requirements + +- Vim 8+ +- [npm](https://www.npmjs.com/get-npm) + +## Install the Flux plugin + +There are many ways to install and manage Vim plugins. +We recommend either of the following two methods: + +- [Install with vim-lsp](#install-with-vim-lsp) +- [Install with vim-coc](#install-with-vim-coc) + +Both methods require you to add the following to your `.vimrc` so that Vim can recognize the `.flux` file type: + +``` +" Flux file type +au BufRead,BufNewFile *.flux set filetype=flux +``` + +### Install with vim-lsp + +1. **Install `flux-lsp-cli` with npm** + + ```sh + npm i -g @influxdata/flux-lsp-cli + ``` + +2. **Install [vim-lsp](https://github.com/prabirshrestha/vim-lsp)** + + If it doesn't exist yet, create a directory called `pack/$USER/start/` in your `~/.vim/` and clone `vim-lsp` into it: + + ```sh + cd ~ + mkdir -p .vim/pack/$USER/start/ + cd .vim/pack/$USER/start/ + git clone https://github.com/prabirshrestha/vim-lsp + ``` + +3. **Edit your `.vimrc`** + + Next, edit your `.vimrc` configuration file to include the following: + + ``` + let g:lsp_diagnostics_enabled = 1 + + if executable('flux-lsp') + au User lsp_setup call lsp#register_server({ + \ 'name': 'flux lsp', + \ 'cmd': {server_info->[&shell, &shellcmdflag, 'flux-lsp']}, + \ 'whitelist': ['flux'], + \ }) + endif + + autocmd FileType flux nmap gd (lsp-definition) + ``` + +### Install with vim-coc + +1. **Install `flux-lsp-cli` from npm** + + ```sh + npm i -g @influxdata/flux-lsp-cli + ``` +2. **Install plug-vim** + + [Install plug-vim](https://github.com/junegunn/vim-plug#installation), a plugin manager for Vim. + +3. **Install vim-coc** + + [Install vim-coc](https://github.com/neoclide/coc.nvim#quick-start), a code-completion plugin for Vim. + +4. **Configure vim-coc** + + vim-coc uses a `coc-settings.json` located in your `~/.vim/` directory. + To run the Flux LSP, add the Flux section under `languageserver`: + + ```json + { + "languageserver": { + "flux": { + "command": "flux-lsp", + "filetypes": ["flux"] + } + } + } + ``` + + To debug flux-lsp, configure it to log to `/tmp/fluxlsp`: + + ```json + { + "languageserver": { + "flux": { + "command": "flux-lsp", + "args": ["-l", "/tmp/fluxlsp"], + "filetypes": ["flux"] + } + } + } + ``` diff --git a/content/influxdb/v2.5/tools/flux-vscode.md b/content/influxdb/v2.5/tools/flux-vscode.md new file mode 100644 index 000000000..f771cd581 --- /dev/null +++ b/content/influxdb/v2.5/tools/flux-vscode.md @@ -0,0 +1,96 @@ +--- +title: Use the Flux VS Code extension +seotitle: Use the Flux Visual Studio Code extension +description: > + The [Flux Visual Studio Code (VS Code) extension](https://marketplace.visualstudio.com/items?itemName=influxdata.flux) + provides Flux syntax highlighting, autocompletion, and a direct InfluxDB OSS server + integration that lets you run Flux scripts natively and show results in VS Code. +weight: 103 +menu: + influxdb_2_5: + name: Use the Flux VS Code extension + parent: Tools & integrations +--- + +The [Flux Visual Studio Code (VS Code) extension](https://marketplace.visualstudio.com/items?itemName=influxdata.flux) +provides Flux syntax highlighting, autocompletion, and a direct InfluxDB server +integration that lets you run Flux scripts natively and show results in VS Code. + +##### On this page +- [Install the Flux VS Code extension](#install-the-flux-vs-code-extension) +- [Connect to InfluxDB](#connect-to-influxdb) + - [Manage InfluxDB connections](#manage-influxdb-connections) +- [Query InfluxDB from VS Code](#query-influxdb-from-vs-code) +- [Explore your schema](#explore-your-schema) +- [Debug Flux queries](#debug-flux-queries) +- [Upgrade the Flux extension](#upgrade-the-flux-extension) +- [Flux extension commands](#flux-extension-commands) + +## Install the Flux VS Code extension +The Flux VS Code extension is available in the **Visual Studio Marketplace**. +For information about installing extensions from the Visual Studio marketplace, +see the [Extension Marketplace documentation](https://code.visualstudio.com/docs/editor/extension-gallery). + +Once installed, open the **Explorer** area of your VS Code user interface. +A new **InfluxDB** pane is available below your file explorer. + +{{< img-hd src="/img/influxdb/2-1-tools-vsflux-influxdb-pane.png" alt="InfluxDB pane in VS Code" />}} + +## Connect to InfluxDB +To create an InfluxDB connection in VS Code: + +1. Hover over the **InfluxDB** pane and then click the **{{< icon "plus" >}}** icon that appears. + + {{< img-hd src="/img/influxdb/2-1-tools-vsflux-add-connection.png" alt="Add an InfluxDB connection in VS Code" />}} + +2. Provide the required connection credentials: + - **Type:** type of InfluxDB data source. Select **InfluxDB v2**. + - **Name:** unique identifier for your InfluxDB connection. + - **Hostname and Port:** InfluxDB host and port + (see [InfluxDB OSS URLs](/influxdb/v2.5/reference/urls/) or [InfluxDB Cloud regions](/influxdb/cloud/reference/regions/)). + - **Token:** InfluxDB [API token](/influxdb/v2.5/security/tokens/). + - **Organization:** InfluxDB organization name. +3. Click **Test** to test the connection. +4. Once tested successfully, click **Save**. + +### Manage InfluxDB connections +In the **InfluxDB** pane: + +- **To edit a connection**, right click on the connection to edit and select **Edit Connection**. +- **To remove a connection**, right click on the connection to remove and select **Remove Connection**. +- **To switch to a connection**, right click on the connection to switch to and select **Switch To This Connection**. + +## Query InfluxDB from VS Code +1. Write your Flux query in a new VS Code file. +2. Save your Flux script with the `.flux` extension or set the + [VS Code Language Mode](https://code.visualstudio.com/docs/languages/overview#_changing-the-language-for-the-selected-file) to **Flux**. +3. Press {{< keybind mac="fn + F5" other="F5" >}} to execute the query. +4. VS Code displays a list of InfluxDB connection configurations. + Select which InfluxDB connection to use to execute the query. +5. Query results appear in a new tab. If query results do not appear, see [Debug Flux queries](#debug-flux-queries). + +## Explore your schema +After you've configured an InfluxDB connection, VS Code provides an overview of buckets, +measurements, and tags in your InfluxDB organization. +Use the **InfluxDB** pane in VS code to explore your schema. + +{{< img-hd src="/img/influxdb/2-0-tools-vsflux-explore-schema.png" alt="Explore your InfluxDB schema in VS Code" />}} + +## Debug Flux queries +To view errors returned from Flux script executions, click the **Errors and Warnings** +icons in the bottom left of your VS Code window, and then select the **Output** tab in the debugging pane. + +{{< img-hd src="/img/influxdb/2-0-tools-vsflux-errors-warnings.png" alt="VS Code errors and warnings"/>}} + +## Upgrade the Flux extension +VS Code auto-updates extensions by default, but you are able to disable auto-update. +If you disable auto-update, [manually update your VS Code Flux extension](https://code.visualstudio.com/docs/editor/extension-gallery#_update-an-extension-manually). +After updating the extension, reload your VS Code window ({{< keybind mac="⇧⌘P" other="Ctrl+Shift+P" >}}, +and then `Reload Window`) to initialize the updated extensions. + +## Flux extension commands + +| Command | Description | +| :--------------------- | :------------- | +| `influxdb.refresh` | Refresh | +| `influxdb.addInstance` | Add Connection | diff --git a/content/influxdb/v2.5/tools/google-data-studio.md b/content/influxdb/v2.5/tools/google-data-studio.md new file mode 100644 index 000000000..68afab3e1 --- /dev/null +++ b/content/influxdb/v2.5/tools/google-data-studio.md @@ -0,0 +1,35 @@ +--- +title: Use the Google Data Studio connector +description: > + The [InfluxDB Google Data Studio connector](https://datastudio.google.com/u/0/datasources/create?connectorId=AKfycbwhJChhmMypQvNlihgRJMAhCb8gaM3ii9oUNWlW_Cp2PbJSfqeHfPyjNVp15iy9ltCs) + lets you create reports and dashboards in Google Data Studio with data from + InfluxDB Cloud or InfluxDB OSS 2.x. +menu: + influxdb_2_5: + parent: Tools & integrations +weight: 104 +influxdb/v2.5/tags: [google] +--- + +The [InfluxDB Google Data Studio connector](https://datastudio.google.com/u/0/datasources/create?connectorId=AKfycbwhJChhmMypQvNlihgRJMAhCb8gaM3ii9oUNWlW_Cp2PbJSfqeHfPyjNVp15iy9ltCs) lets you create reports and dashboards in Google Data Studio with data from **InfluxDB Cloud** or **InfluxDB OSS {{< current-version >}}**. The connector supports one measurement per organization. + +### Add the InfluxDB Connector to Data Studio + +1. Add the [InfluxDB Connector data source](https://datastudio.google.com/u/0/datasources/create?connectorId=AKfycbwhJChhmMypQvNlihgRJMAhCb8gaM3ii9oUNWlW_Cp2PbJSfqeHfPyjNVp15iy9ltCs). +2. Enter the following connection details: + - `InfluxDB URL`: Your [InfluxDB URL](/influxdb/v2.5/reference/urls/). + - `Token`: Your [API token](/influxdb/v2.5/security/tokens/create-token/) with permission to read from the bucket you're using. + - `Organization`: Your [organization name](/influxdb/v2.5/organizations/view-orgs). + - `Bucket`: Your [bucket name](/influxdb/v2.5/organizations/buckets/view-buckets/). This is auto-populated when you enter the fields above. + - `Measurement`: The [measurement](/influxdb/v2.5/reference/glossary/#measurement) to connect to. +3. Click **Connect**. + +### Create a report of InfluxDB data in Data Studio + +1. Once you're connected, a list of fields available from your measurement, including the [tag set](/influxdb/v2.5/reference/glossary/#tag-set), [field set](/influxdb/v2.5/reference/glossary/#field-set), and [timestamp](/influxdb/v2.5/reference/glossary/#timestamp) appear. Review the list of fields and [edit as needed](https://support.google.com/datastudio/answer/7000529?hl=en&ref_topic=6370331). +2. Click **CREATE REPORT** to [create the report](https://support.google.com/datastudio/topic/6369007?hl=en&ref_topic=6291037). +3. Customize the [visualization in your report](https://support.google.com/datastudio/?hl=en#topic=6291037). + +### Example use case with COVID-19 data + +For an example of how to use this connector, see a [COVID-19 report powered by InfluxDB](https://github.com/influxdata/influxdb-gds-connector/tree/master/examples). diff --git a/content/influxdb/v2.5/tools/grafana.md b/content/influxdb/v2.5/tools/grafana.md new file mode 100644 index 000000000..d3bdb8e6c --- /dev/null +++ b/content/influxdb/v2.5/tools/grafana.md @@ -0,0 +1,357 @@ +--- +title: Use Grafana with InfluxDB OSS +description: > + Use [Grafana](https://grafana.com/) to visualize data from your **InfluxDB** instance. +menu: + influxdb_2_5: + name: Use Grafana + parent: Tools & integrations +weight: 104 +influxdb/v2.5/tags: [grafana] +aliases: + - /influxdb/v2.5/visualize-data/other-tools/grafana/ +related: + - https://grafana.com/docs/, Grafana documentation + - /influxdb/v2.5/query-data/get-started/ +--- + +Use [Grafana](https://grafana.com/) or [Grafana Cloud](https://grafana.com/products/cloud/) +to visualize data from your **InfluxDB {{< current-version >}}** instance. + +{{% note %}} +The instructions in this guide require **Grafana Cloud** or **Grafana v8.0+**. +{{% /note %}} + +1. [Start InfluxDB OSS {{< current-version >}}](/influxdb/v2.5/install/#start-influxdb). +2. [Sign up for Grafana Cloud](https://grafana.com/products/cloud/) or + [download and install Grafana](https://grafana.com/grafana/download). +3. Visit your **Grafana Cloud user interface** (UI) or, if running Grafana locally, + [start Grafana](https://grafana.com/docs/grafana/latest/installation/) and visit + `http://localhost:3000` in your browser. +4. In the left navigation of the Grafana UI, hover over the gear + icon to expand the **Configuration** section. Click **Data Sources**. +5. Click **Add data source**. +6. Select **InfluxDB** from the list of available data sources. +7. On the **Data Source configuration page**, enter a **name** for your InfluxDB data source. +8. Under **Query Language**, select one of the following: + +{{< tabs-wrapper >}} +{{% tabs %}} +[Flux](#) +[InfluxQL](#) +{{% /tabs %}} +{{% tab-content %}} +## Configure Grafana to use Flux + +With **Flux** selected as the query language in your InfluxDB data source, +configure your InfluxDB connection: + +1. Under **HTTP**, enter the following: + + - **URL**: Your + {{% oss-only %}}[InfluxDB URL](/influxdb/v2.5/reference/urls/).{{% /oss-only %}} + {{% cloud-only %}}[InfluxDB Cloud region URL](/influxdb/v2.5/reference/regions/).{{% /cloud-only %}} + + ```sh + http://localhost:8086/ + ``` + + - **Access**: Server (default) + +2. Under **InfluxDB Details**, enter the following: + + - **Organization**: Your InfluxDB [organization name **or** ID](/influxdb/v2.5/organizations/view-orgs/). + - **Token**: Your InfluxDB [API token](/influxdb/v2.5/security/tokens/). + - **Default Bucket**: The default [bucket](/influxdb/v2.5/organizations/buckets/) to use in Flux queries. + - **Min time interval**: The [Grafana minimum time interval](https://grafana.com/docs/grafana/latest/features/datasources/influxdb/#min-time-interval). + Default is `10s` + - **Max series**: The maximum number of series or tables Grafana will process. + Default is `1000`. + +3. Click **Save & Test**. Grafana attempts to connect to the InfluxDB {{< current-version >}} + datasource and returns the results of the test. + +{{% cloud-only %}} + {{< img-hd src="/img/influxdb/cloud-tools-grafana.png" alt="Use Grafana with InfluxDB Cloud and Flux" />}} +{{% /cloud-only %}} + +{{< oss-only >}} + {{< img-hd src="/img/influxdb/2-2-tools-grafana.png" alt="Use Grafana with InfluxDB and Flux" />}} +{{< /oss-only >}} + +{{% /tab-content %}} + + +{{% tab-content %}} + +## Configure Grafana to use InfluxQL + + + + +{{% oss-only %}} + +To query InfluxDB {{< current-version >}} with InfluxQL, find your use case below, +and then complete the instructions to configure Grafana: + +- [Installed a new InfluxDB {{< current-version >}} instance](#installed-a-new-influxdb-instance) +- [Upgraded from InfluxDB 1.x to {{< current-version >}} (following the official upgrade)](#upgraded-from-influxdb-1x-to-2x) +- [Manually migrated from InfluxDB 1.x to {{< current-version >}}](#manually-migrated-from-influxdb-1x-to-2x) + +### Installed a new InfluxDB instance +To configure Grafana to use InfluxQL with a new install of InfluxDB {{< current-version >}}, do the following: + +1. [Authenticate with InfluxDB {{< current-version >}} tokens](/influxdb/v2.5/security/tokens/). +2. [Manually create DBRP mappings](#view-and-create-influxdb-dbrp-mappings). + +### Upgraded from InfluxDB 1.x to 2.x +To configure Grafana to use InfluxQL when you've upgraded from InfluxDB 1.x to +InfluxDB {{< current-version >}} (following an [official upgrade guide](/influxdb/v2.5/upgrade/v1-to-v2/)): + +1. Authenticate using the _non-admin_ [v1 compatible authentication credentials](#view-and-create-influxdb-v1-authorizations) + created during the upgrade process. +2. Use the DBRP mappings InfluxDB automatically created in the upgrade process (no action necessary). + +### Manually migrated from InfluxDB 1.x to 2.x +To configure Grafana to use InfluxQL when you've manually migrated from InfluxDB +1.x to InfluxDB {{< current-version >}}, do the following: + +1. If your InfluxDB 1.x instance required authentication, + [create v1 compatible authentication credentials](#view-and-create-influxdb-v1-authorizations) + to match your previous 1.x username and password. + Otherwise, use [InfluxDB v2 token authentication](/influxdb/v2.5/security/tokens/). +2. [Manually create DBRP mappings](#view-and-create-influxdb-dbrp-mappings). + +{{< expand-wrapper >}} +{{% expand "View and create InfluxDB v1 authorizations" %}} + +InfluxDB {{< current-version >}} provides a 1.x compatible authentication API that lets you +authenticate with a username and password like InfluxDB 1.x +_(separate from the credentials used to log into the InfluxDB user interface)_. + +#### View existing v1 authorizations +Use the [`influx v1 auth list`](/influxdb/v2.5/reference/cli/influx/v1/auth/list/) +to list existing InfluxDB v1 compatible authorizations. + +```sh +influx v1 auth list +``` + +#### Create a v1 authorization +Use the [`influx v1 auth create` command](/influxdb/v2.5/reference/cli/influx/v1/auth/create/) +to grant read/write permissions to specific buckets. Provide the following: + +- [bucket IDs](/influxdb/v2.5/organizations/buckets/view-buckets/) to grant read + or write permissions to +- new username +- new password _(when prompted)_ + + +```sh +influx v1 auth create \ + --read-bucket 00xX00o0X001 \ + --write-bucket 00xX00o0X001 \ + --username example-user +``` +{{% /expand %}} +{{< expand "View and create InfluxDB DBRP mappings" >}} + +When using InfluxQL to query InfluxDB, the query must specify a database and a retention policy. +InfluxDB DBRP mappings associate database and retention policy combinations with +InfluxDB {{< current-version >}} [buckets](/influxdb/v2.5/reference/glossary/#bucket). + +DBRP mappings do not affect the retention period of the target bucket. +These mappings allow queries following InfluxDB 1.x conventions to successfully +query InfluxDB {{< current-version >}} buckets. + +#### View existing DBRP mappings +Use the [`influx v1 dbrp list`](/influxdb/v2.5/reference/cli/influx/v1/dbrp/list/) +to list existing DBRP mappings. + +```sh +influx v1 dbrp list +``` + +#### Create a DBRP mapping +Use the [`influx v1 dbrp create` command](/influxdb/v2.5/reference/cli/influx/v1/dbrp/create/) +command to create a DBRP mapping. +Provide the following: + +- database name +- retention policy name _(not retention period)_ +- [bucket ID](/influxdb/v2.5/organizations/buckets/view-buckets/) +- _(optional)_ `--default` flag if you want the retention policy to be the default retention + policy for the specified database + +```sh +influx v1 dbrp create \ + --db example-db \ + --rp example-rp \ + --bucket-id 00xX00o0X001 \ + --default +``` + +{{% note %}} +#### Repeat for each DBRP combination +Each unique database and retention policy combination used by Grafana must be +mapped to an InfluxDB {{< current-version >}} bucket. +If you have multiple retention policies for a single bucket, set one of the the +retention polices as the default using the `--default` flag. +{{% /note %}} + +_For more information about DBRP mapping, see [Database and retention policy mapping](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/)._ + +{{< /expand >}} +{{< /expand-wrapper >}} + +{{% /oss-only %}} + + + + + + + +{{% cloud-only el="div" %}} + +To query InfluxDB Cloud from Grafana using InfluxQL: + +1. [Download and set up the `influx` CLI](#download-and-set-up-the-influx-cli) +2. [Create an InfluxDB DBRP mapping](#create-an-influxdb-dbrp-mapping) +3. [Configure your InfluxDB connection](#configure-your-influxdb-connection) + +### Download and set up the influx CLI +1. [Download the latest version of the `influx` CLI](/influxdb/cloud/sign-up/#optional-download-install-and-use-the-influx-cli) + appropriate for your local operating system. +2. Create a CLI configuration that provides the required InfluxDB Cloud **host**, + **organization**, and **API token** to all CLI commands. + Use the [`influx config create` command](/influxdb/cloud/reference/cli/influx/config/create/) + and provide the following: + + - [InfluxDB Cloud URL](/influxdb/cloud/reference/regions/) + - [organization name](/influxdb/cloud/organizations/) _(by default, your email address)_ + - [API token](/influxdb/cloud/security/tokens/) + + ```sh + influx config create \ + --config-name example-config-name \ + --host-url https://cloud2.influxdata.com \ + --org example-org \ + --token My5uP3rSeCr37t0k3n + ``` + + For more information about `influx` CLI configurations, + see [`influx config`](/influxdb/cloud/reference/cli/influx/config/). + +### Create an InfluxDB DBRP mapping +When using InfluxQL to query InfluxDB Cloud, the query must specify a database and a retention policy. +Use the [`influx v1 dbrp create` command](/influxdb/cloud/reference/cli/influx/v1/dbrp/create/) +command to create a database/retention policy (DBRP) mapping that associates a database +and retention policy combination with an InfluxDB Cloud [bucket](/influxdb/cloud/reference/glossary/#bucket). + +DBRP mappings do not affect the retention period of the target bucket. +These mappings allow queries following InfluxDB 1.x conventions to successfully +query InfluxDB Cloud buckets. + +{{% note %}} +##### Automatically create DBRP mappings on write +When using the InfluxDB 1.x compatibility API to write data to InfluxDB Cloud, +InfluxDB Cloud automatically creates DBRP mappings for buckets whose names match the +`db/rp` naming pattern of the database and retention policy specified in the write request. +For more information, see [Database and retention policy mapping – Writing data](/influxdb/cloud/reference/api/influxdb-1x/dbrp/#when-writing-data). +{{% /note %}} + +Provide the following: + +- database name +- [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) name _(not retention period)_ +- [bucket ID](/influxdb/cloud/organizations/buckets/view-buckets/) +- _(optional)_ `--default` flag if you want the retention policy to be the default retention + policy for the specified database + +```sh +influx v1 dbrp create \ + --db example-db \ + --rp example-rp \ + --bucket-id 00xX00o0X001 \ + --default +``` + +{{% note %}} +#### Repeat for each DBRP combination +Each unique database and retention policy combination used by Grafana must be +mapped to an InfluxDB {{< current-version >}} bucket. +If you have multiple retention policies for a single bucket, set one of the the +retention polices as the default using the `--default` flag. +{{% /note %}} + +_For more information about DBRP mapping, see [Database and retention policy mapping](/influxdb/cloud/reference/api/influxdb-1x/dbrp/)._ + +{{% /cloud-only %}} + + + + + +### Configure your InfluxDB connection +With **InfluxQL** selected as the query language in your InfluxDB data source settings: + +1. Under **HTTP**, enter the following: + + - **URL**: Your [InfluxDB URL](/influxdb/v2.5/reference/urls/). + + ```sh + http://localhost:8086/ + ``` + - **Access**: Server (default) + +2. Configure InfluxDB authentication: + + - ##### Token authentication + + - Under **Custom HTTP Headers**, select **{{< icon "plus" >}}Add Header**. Provide your InfluxDB API token: + + - **Header**: Enter `Authorization` + - **Value**: Use the `Token` schema and provide your [InfluxDB API token](/influxdb/v2.5/security/tokens/). + For example: + + ``` + Token y0uR5uP3rSecr3tT0k3n + ``` + + - Under **InfluxDB Details**, do the following: + + - **Database**: Enter the database name [mapped to your InfluxDB {{< current-version >}} bucket](#view-and-create-influxdb-dbrp-mappings) + - **HTTP Method**: Select **GET** + + - ##### Authenticate with username and password + + Under **InfluxDB Details**, do the following: + + - **Database**: Enter the database name [mapped to your InfluxDB {{< current-version >}} bucket](#view-and-create-influxdb-dbrp-mappings) + - **User**: Enter the username associated with your [InfluxDB 1.x compatibility authorization](#view-and-create-influxdb-v1-authorizations) + - **Password**: Enter the password associated with your [InfluxDB 1.x compatibility authorization](#view-and-create-influxdb-dbrp-mappings) + - **HTTP Method**: Select **GET** + +3. Click **Save & Test**. Grafana attempts to connect to the InfluxDB {{< current-version >}} data source + and returns the results of the test. + +{{% cloud-only %}} + {{< img-hd src="/img/influxdb/cloud-tools-grafana-influxql.png" alt="Use Grafana with InfluxDB Cloud and Flux" />}} +{{% /cloud-only %}} + +{{< oss-only >}} + {{< img-hd src="/img/influxdb/2-2-tools-grafana-influxql.png" alt="Use Grafana with InfluxDB and Flux" />}} +{{< /oss-only >}} + +{{% /tab-content %}} + +{{< /tabs-wrapper >}} + +## Query and visualize data + +With your InfluxDB connection configured, use Grafana and Flux to query and +visualize time series data stored in your **InfluxDB** instance. + +For more information about using Grafana, see the [Grafana documentation](https://grafana.com/docs/). +If you're just learning Flux, see [Get started with Flux](/{{< latest "flux" >}}/get-started/). diff --git a/content/influxdb/v2.5/tools/influx-cli.md b/content/influxdb/v2.5/tools/influx-cli.md new file mode 100644 index 000000000..b1d14fa21 --- /dev/null +++ b/content/influxdb/v2.5/tools/influx-cli.md @@ -0,0 +1,257 @@ +--- +title: Install and use the influx CLI +description: + Use the `influx` and `influxd` command line interfaces to interact with and + manage InfluxDB. +menu: + influxdb_2_5: + name: Use the influx CLI + parent: Tools & integrations + identifier: influx-cli-task-based +weight: 101 +influxdb/v2.5/tags: [cli] +aliases: + - /influxdb/v2.5/tools/clis/ +related: + - /influxdb/v2.5/reference/cli/influx/ +--- + +Use the `influx` CLI to interact with and manage your +InfluxDB {{% cloud-only %}}Cloud{{% /cloud-only %}} instance. +Write and query data, generate InfluxDB templates, export data, and more. + +{{% oss-only %}} + +{{% note %}} +The [`influx` CLI](/influxdb/v2.5/reference/cli/influx) is packaged and versioned +separately from the InfluxDB server (`influxd`). +{{% /note %}} + +{{% /oss-only %}} + +- [Install the influx CLI](#install-the-influx-cli) +- [Set up the influx CLI](#set-up-the-influx-cli) +- [Use influx CLI commands](#use-influx-cli-commands) + +## Install the influx CLI + +{{< tabs-wrapper >}} +{{% tabs %}} +[macOS](#) +[Linux](#) +[Windows](#) +{{% /tabs %}} + + +{{% tab-content %}} + +Do one of the following: + +- [Use Homebrew](#use-homebrew) +- [Manually download and install](#manually-download-and-install) + +### Use Homebrew +```sh +brew install influxdb-cli +``` + +{{% oss-only %}} + +{{% note %}} +If you used Homebrew to install **InfluxDB v{{< current-version >}}**, the `influxdb-cli` +formula was downloaded as a dependency and should already be installed. +If installed, `influxdb-cli` will appear in the output of the following command: + +```sh +brew list | grep influxdb-cli +``` +{{% /note %}} + +{{% /oss-only %}} + +### Manually download and install + +1. **Download the `influx` CLI package.** + + influx CLI v{{< latest-patch cli=true >}} (macOS) + +2. **Unpackage the downloaded package.** + + Do one of the following: + + - Double-click the downloaded package file in **Finder**. + - Run the following command in a macOS command prompt application such + **Terminal** or **[iTerm2](https://www.iterm2.com/)**: + + ```sh + # Unpackage contents to the current working directory + tar zxvf ~/Downloads/influxdb2-client-{{< latest-patch cli=true >}}-darwin-amd64.tar.gz + ``` + +3. **(Optional) Place the binary in your `$PATH`.** + + ```sh + # (Optional) Copy the influx binary to your $PATH + sudo cp ~/Downloads/influxdb2-client-{{< latest-patch cli=true >}}-darwin-amd64/influx /usr/local/bin/ + ``` + + If you do not move the `influx` binary into your `$PATH`, prefix the executable + `./` to run it in place. + +4. **(macOS Catalina and newer) Authorize the `influx` binary.** + + macOS requires downloaded binaries to be signed by registered Apple developers. + When you first attempt to run `influx`, macOS will prevent it from running. + To authorize the `influx` binary: + + 1. Attempt to run an `influx` command. + 2. Open **System Preferences** and click **Security & Privacy**. + 3. Under the **General** tab, there is a message about `influx` being blocked. + Click **Open Anyway**. + +{{% /tab-content %}} + + + +{{% tab-content %}} + +1. **Download the influx CLI package.** + + Download the `influx` CLI package [from your browser](#download-from-your-browser) + or [from the command line](#download-from-the-command-line). + + #### Download from your browser + + influx CLI v{{< latest-patch cli=true >}} (amd64) + influx CLI v{{< latest-patch cli=true >}} (arm) + + #### Download from the command line + + ```sh + # amd64 + wget https://dl.influxdata.com/influxdb/releases/influxdb2-client-{{< latest-patch cli=true >}}-linux-amd64.tar.gz + + # arm + wget https://dl.influxdata.com/influxdb/releases/influxdb2-client-{{< latest-patch cli=true >}}-linux-arm64.tar.gz + ``` + +4. **Unpackage the downloaded package.** + + _**Note:** The following commands are examples. Adjust the filenames, paths, and utilities if necessary._ + + ```sh + # amd64 + tar xvzf path/to/influxdb2-client-{{< latest-patch cli=true >}}-linux-amd64.tar.gz + + # arm + tar xvzf path/to/influxdb2-client-{{< latest-patch cli=true >}}-linux-arm64.tar.gz + ``` + +3. **(Optional) Place the unpackaged `influx` executable in your system `$PATH`.** + + ```sh + # amd64 + sudo cp influxdb2-client-{{< latest-patch cli=true >}}-linux-amd64/influx /usr/local/bin/ + + # arm + sudo cp influxdb2-client-{{< latest-patch cli=true >}}-linux-arm64/influx /usr/local/bin/ + ``` + + If you do not move the `influx` binary into your `$PATH`, prefix the executable + `./` to run it in place. + +{{% /tab-content %}} + + + +{{% tab-content %}} + +1. **Download the `influx` CLI package.** + + influx CLI v{{< latest-patch cli=true >}} (Windows) + +2. **Expand the downloaded archive.** + + Expand the downloaded archive into `C:\Program Files\InfluxData\` and rename it if desired. + + ```powershell + > Expand-Archive .\influxdb2-client-{{< latest-patch cli=true >}}-windows-amd64.zip -DestinationPath 'C:\Program Files\InfluxData\' + > mv 'C:\Program Files\InfluxData\influxdb2-client-{{< latest-patch cli=true >}}-windows-amd64' 'C:\Program Files\InfluxData\influx' + ``` + +3. **Grant network access to the `influx` CLI.** + + When using the `influx` CLI for the first time, **Windows Defender** displays + the following message: + + > Windows Defender Firewall has blocked some features of this app. + + To grant the `influx` CLI the required access, do the following: + + 1. Select **Private networks, such as my home or work network**. + 2. Click **Allow access**. + +{{% /tab-content %}} + +{{< /tabs-wrapper >}} + +## Set up the influx CLI + +- [Provide required authentication credentials](#provide-required-authentication-credentials) +- [Enable shell completion (Optional)](#enable-shell-completion-optional) + +### Provide required authentication credentials +To avoid having to pass your InfluxDB **host**, **API token**, and **organization** +with each command, store them in an `influx` CLI configuration (config). +`influx` commands that require these credentials automatically retrieve these +credentials from the active config. + +Use the [`influx config create` command](/influxdb/v2.5/reference/cli/influx/config/create/) +to create an `influx` CLI config and set it as active: + +```sh +influx config create --config-name \ + --host-url http://localhost:8086 \ + --org \ + --token \ + --active +``` + +For more information about managing CLI configurations, see the +[`influx config` documentation](/influxdb/v2.5/reference/cli/influx/config/). + +For instructions on how to create API tokens, see [Create a token](/influxdb/v2.5/security/tokens/create-token/). + +{{% oss-only %}} + +#### Authenticate with a username and password + +The **`influx` CLI 2.4.0+** lets you create connection configurations +that authenticate with **InfluxDB OSS 2.4+** using the username and +password combination that you would use to log into the InfluxDB user interface (UI). +The CLI retrieves a session cookie and stores it, unencrypted, in your +[configs path](/influxdb/v2.5/reference/internals/file-system-layout/#configs-path). + +Use the `--username-password`, `-p` option to provide your username and password +using the `:` syntax. +If no password is provided, the CLI will prompt for a password after each +command that requires authentication. + +```sh +influx config create \ + -n config-name \ + -u http://localhost:8086 \ + -p example-user:example-password \ + -o example-org +``` + +{{% /oss-only %}} + +### Enable shell completion (Optional) + +To install `influx` shell completion scripts, see +[`influx completion`](/influxdb/v2.5/reference/cli/influx/completion/#install-completion-scripts). + +## Use influx CLI commands +_For information about `influx` CLI commands, see the +[`influx` CLI reference documentation](/influxdb/v2.5/reference/cli/influx/)._ diff --git a/content/influxdb/v2.5/tools/influxql-shell.md b/content/influxdb/v2.5/tools/influxql-shell.md new file mode 100644 index 000000000..5a6c43a74 --- /dev/null +++ b/content/influxdb/v2.5/tools/influxql-shell.md @@ -0,0 +1,472 @@ +--- +title: Use the InfluxQL shell +description: > + Use the InfluxQL interactive shell to execute InfluxQL queries and interact with InfluxDB. +menu: + influxdb_2_5: + name: Use the InfluxQL shell + parent: Tools & integrations +weight: 104 +influxdb/v2.5/tags: [InfluxQL] +related: + - /influxdb/v2.5/reference/cli/influx/v1/shell/ +--- + +Use the InfluxQL interactive shell to execute InfluxQL queries with InfluxDB. + +- [Map database and retention policies to buckets](#map-database-and-retention-policies-to-buckets) +- [Download and install the influx CLI](#download-and-install-the-influx-cli) +- [Start the InfluxQL shell](#start-the-influxql-shell) +- [Execute InfluxQL queries](#execute-influxql-queries) +- [Use and configure display formats](#use-and-configure-display-formats) +- [InfluxQL shell helper commands](#influxql-shell-helper-commands) + +## Map database and retention policies to buckets + +InfluxQL queries require a database and retention policy to query data. +In InfluxDB {{% current-version %}}, databases and retention policies have been +combined and replaced with [buckets](/influxdb/v2.5/reference/glossary/#bucket). +To use the InfluxQL to query an InfluxDB {{% current-version %}} bucket, first +map your DBRP combinations to an appropriate bucket. + +For information about creating DBRP mappings, see +[Query data with InfluxQL](/influxdb/v2.5/query-data/influxql/). + +## Download and install the influx CLI + +The InfluxQL REPL is included in the **`influx` CLI (2.4+)**. +[Download and install the `influx` CLI](/influxdb/v2.5/tools/influx-cli/#install-the-influx-cli). + +## Start the InfluxQL shell + +Use the [`influx v1 shell` command](/influxdb/v2.5/reference/cli/influx/v1/shell/) +to start an InfluxQL shell session. + +```sh +influx v1 shell +``` + +### Configure your InfluxDB connection + +The `influx v1 shell` command requires the following to connect to InfluxDB: + +- {{% oss-only %}}[InfluxDB host](/influxdb/v2.5/reference/urls/){{% /oss-only %}} + {{% cloud-only %}}[InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/){{% /cloud-only %}} +- [Organization name or ID](/influxdb/v2.5/organizations/view-orgs/) +- [API token](/influxdb/v2.5/security/tokens/) + +Use one of the following methods to provide these credentials to the `influx v1 shell` command: + +{{< tabs-wrapper >}} +{{% tabs "small" %}} +[CLI config _(Recommended)_](#) +[Command flags](#) +[Environment variables](#) +{{% /tabs %}} +{{% tab-content %}} + + +The `influx` CLI lets your configure and store multiple sets of connection +credentials to use with commands. +Each set of credentials is a **CLI config**. +Use CLI configs to provide required credentials to the `influx v1 shell` command. + +1. Create a new CLI config and set it to _active_. + + ```sh + influx config create --config-name \ + --host-url http://localhost:8086 \ + --org example-org \ + --token mY5up3Rs3CrE7t0k3N \ + --active + ``` + +2. Start an InfluxQL shell + + ```sh + influx v1 shell + ``` + +All `influx` commands use credentials provided by the active CLI config. +For more information about managing CLI configs, see the +[`influx config` documentation](/influxdb/v2.5/reference/cli/influx/config/). + + +{{% /tab-content %}} +{{% tab-content %}} + + +Use `influx v1 shell` command flags to provide the required credentials: + +```sh +influx v1 shell \ + --host http://localhost:8086 \ + --org example-org \ + --token mY5up3Rs3CrE7t0k3N +``` + + +{{% /tab-content %}} +{{% tab-content %}} + + +Use environment variables to provided the required credentials. +The `influx` CLI will automatically use the following environment variables for +required credentials if the environment variables are set: + +- `INFLUX_HOST` +- `INFLUX_ORG` or `INFLUX_ORG_ID` +- `INFLUX_TOKEN` + +```sh +export INFLUX_HOST=http://localhost:8086 +export INFLUX_ORG=example-org +export INFLUX_TOKEN=mY5up3Rs3CrE7t0k3N + +influx v1 shell +``` + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## Execute InfluxQL queries + +Within the InfluxQL shell, execute any InfluxQL query supported by InfluxDB {{< current-version >}}. +For information about what queries are supported see +[InfluxQL support in InfluxDB {{< current-version >}}](/influxdb/v2.5/query-data/influxql/#influxql-support). + +View the [InfluxQL documentation (InfluxDB 1.8)](/influxdb/v1.8/query_language/) +for in-depth documentation about the query language. + +## Use and configure display formats + +The InfluxQL shell outputs query results using different display formats. +Use the [`format` helper command](#format) to specify which display format to use. + +{{< tabs-wrapper >}} +{{% tabs %}} +[table _(default)_](#) +[column](#) +[csv](#) +[json](#) +{{% /tabs %}} +{{% tab-content %}} + + +The InfluxQL shell uses the `table` display format by default. +If using another display format and you want to switch back to the `table` format, +run the following _in the InfluxQL shell_. + +```sql +format table +``` + +### Table-formatted results + +The table format outputs results in an interactive table format. + +{{< img-hd src="/img/influxdb/2-4-influxql-shell-table-format.png" alt="InfluxQL shell table display format" />}} + +Results are paginated. +Use `shift + up/down arrow` to navigate between pages. +Use `q` to exit out of the interactive table display. + +### Configure the table display format + +#### Use scientific notation + +To display values using scientific notation, use the [`scientific` helper command](#scientific) +to toggle scientific notation. + +#### Specify timestamp precision or format + +To specify the precision or format of timestamps returned in results, use the +[`precision` helper command](#precision). + +```sql +-- Return results formatted as RFC3339 timestamps +precision rfc3339 + +-- Return results with second-precision unix timestamps +precision s +``` + + + +{{% /tab-content %}} +{{% tab-content %}} + + +To use the `column` format, run the following _in the InfluxQL shell_. + +```sql +format column +``` + +### Column-formatted results + +The `column` format displays results in a text-based column format. + +``` +name: cpu +time usage_user usage_system +---- ---------- ------------ +1.62767581e+09 5.476026754935672 2.5629805588360313 +1.62767581e+09 0.4999999999972715 0.09999999999990905 +1.62767581e+09 18.718718718689555 10.810810810692704 +1.62767581e+09 6.500000000090222 3.2000000000343425 +1.62767581e+09 4.1999999999336435 1.3999999999778812 +1.62767581e+09 7.992007992122577 4.095904095946467 +1.62767581e+09 0.3000000000054934 0.1000000000010732 +``` + +### Configure the column display format + +#### Specify timestamp precision {#specify-timestamp-precision-column} + +To specify the precision or format of timestamps returned in results, use the +[`precision` helper command](#precision). + +```sql +-- Return results formatted as RFC3339 timestamps +precision rfc3339 + +-- Return results with second-precision unix timestamps +precision s +``` + + +{{% /tab-content %}} +{{% tab-content %}} + + +To use the `csv` format, run the following _in the InfluxQL shell_. + +```sql +format csv +``` + +### CSV-formatted results + +The `csv` format displays results in CSV format. + +```csv +name,time,usage_user,usage_system +cpu,1.62767582e+09,4.207038819798416,3.5194098893833914 +cpu,1.62767582e+09,0.19980019980215585,0.19980019980215585 +cpu,1.62767582e+09,14.914914914981258,14.114114114162232 +cpu,1.62767582e+09,5.805805805828698,4.004004003985887 +cpu,1.62767582e+09,2.5025025025339978,1.8018018018273916 +cpu,1.62767582e+09,7.299999999874271,5.699999999930733 +cpu,1.62767582e+09,0.09999999999647116,0.0999999999987449 +``` + +### Configure the CSV display format + +#### Specify timestamp precision {#specify-timestamp-precision-csv} + +To specify the precision or format of timestamps returned in results, use the +[`precision` helper command](#precision). + +```sql +-- Return results formatted as RFC3339 timestamps +precision rfc3339 + +-- Return results with second-precision unix timestamps +precision s +``` + + +{{% /tab-content %}} +{{% tab-content %}} + + +To use the `json` format, run the following _in the InfluxQL shell_. + +```sql +format json +``` + +### JSON-formatted results + +The `csv` format displays results in JSON format. + +{{% truncate %}} +```json +{ + "results": [ + { + "series": [ + { + "columns": [ + "time", + "usage_user", + "usage_system" + ], + "name": "cpu", + "values": [ + [ + 1627675850, + 4.601935685334947, + 4.139868872973054 + ], + [ + 1627675850, + 0.3992015968099201, + 0.2994011976074401 + ], + [ + 1627675850, + 7.599999999947613, + 7.299999999995634 + ], + [ + 1627675850, + 0.3992015968098205, + 0.4990019960088718 + ], + [ + 1627675850, + 9.59040959050348, + 8.49150849158481 + ], + [ + 1627675850, + 0.2997002996974768, + 0.39960039959966437 + ], + [ + 1627675850, + 9.590409590464631, + 8.691308691326773 + ] + ] + } + ], + "statement_id": 0 + } + ] +} +``` +{{% /truncate %}} + +### Configure the JSON display format + +#### Pretty print JSON output + +By default, the `json` display format returns an unformatted JSON string. +To format the JSON, use the [`pretty` helper command](#pretty) to toggle JSON +pretty printing. + +#### Specify timestamp precision {#specify-timestamp-precision-json} + +To specify the precision or format of timestamps returned in results, use the +[`precision` helper command](#precision). + +```sql +-- Return results formatted as RFC3339 timestamps +precision rfc3339 + +-- Return results with second-precision unix timestamps +precision s +``` + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## InfluxQL shell helper commands + +The InfluxQL shell supports the following helper commands: + +- [clear](#clear) +- [exit](#exit) +- [format](#format) +- [gopher](#gopher) +- [help](#help) +- [history](#history) +- [precision](#precision) +- [pretty](#pretty) +- [quit](#quit) +- [scientific](#scientific) +- [use](#use) + +### clear + +Clear session based-settings such as database. + +### exit + +Exit the InfluxQL shell. + +### format + +Specify the data display format. +The InfluxQL supports the following display formats: + +- csv +- json +- column +- table _(default)_ + +```sql +-- Display query output using column display +format column +``` + +For more information, see [Use and configure display formats](#use-and-configure-display-formats). + +### gopher + +Print the Go gopher. + +### help + +Print the InfluxQL shell help options. + +### history + +View the InfluxQL shell history. + +### precision + +Specify the format or precision of timestamps. +Use one of the following: + +- rfc3339 +- h +- m +- s +- ms +- u +- ns _(default)_ + +```sql +-- Set timestamp precision to seconds +precision s +``` + +### pretty + +Toggle "pretty print" for the [`json` display format](#format). + +### quit + +Exit the InfluxQL shell + +### scientific + +Toggle scientific number format for the [`table` display format](#format). + +### use + +Set the database and retention policy (optional) to use for queries. + +```sql +-- Use the exampledb database +use exampledb + +-- Use the exampledb database and examplerp retention policy +use exampledb.examplerp +``` diff --git a/content/influxdb/v2.5/tools/kapacitor.md b/content/influxdb/v2.5/tools/kapacitor.md new file mode 100644 index 000000000..39e6cb4c4 --- /dev/null +++ b/content/influxdb/v2.5/tools/kapacitor.md @@ -0,0 +1,143 @@ +--- +title: Use Kapacitor with InfluxDB OSS +description: > + [Kapacitor](/kapacitor/) is a data processing framework that makes it easy to + create alerts, run ETL (Extract, Transform and Load) jobs and detect anomalies. + Use Kapacitor with **InfluxDB OSS 2.x**. +menu: + influxdb_2_5: + name: Use Kapacitor + parent: Tools & integrations +weight: 102 +related: + - /{{< latest "kapacitor" >}}/ +--- + +[Kapacitor](/{{< latest "kapacitor" >}}/) is a data processing framework that makes +it easy to create alerts, run ETL jobs and detect anomalies. +Kapacitor interacts with **InfluxDB Cloud** and **InfluxDB OSS {{< current-version >}}** using the +[InfluxDB 1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/), so +you can continue using Kapacitor without having to migrate libraries of TICKscripts +to InfluxDB tasks. + +{{% note %}} +#### Support for stream tasks +InfluxDB Cloud and InfluxDB OSS {{< current-version >}} do not have subscription APIs and +**do not support Kapacitor stream tasks**, but you can continue to use stream +tasks by writing data directly to Kapacitor. +For more information, see [below](#use-kapacitor-stream-tasks). +{{% /note %}} + +#### On this page +- [Configure Kapacitor to connect to InfluxDB](#configure-kapacitor-to-connect-to-influxdb) +- [Use Kapacitor batch tasks](#use-kapacitor-batch-tasks) +- [Use Kapacitor stream tasks](#use-kapacitor-stream-tasks) +- [Write back to InfluxDB](#write-back-to-influxdb) + +## Configure Kapacitor to connect to InfluxDB +To connect Kapacitor to InfluxDB Cloud or InfluxDB OSS {{< current-version >}}, update the `[[influxdb]]` +section(s) of your [Kapacitor configuration file](/{{< latest "kapacitor" >}}/administration/configuration/#kapacitor-configuration-file): + +- [Specify your InfluxDB URL](#specify-your-influxdb-url) +- [Provide InfluxDB authentication credentials](#provide-influxdb-authentication-credentials) +- [Disable InfluxDB subcriptions](#disable-influxdb-subscriptions) + +### Specify your InfluxDB URL +Provide your InfluxDB URL in the `[[influxdb]].urls` configuration option. +For more information, see [InfluxDB Cloud regions](/influxdb/cloud/reference/regions/) +or [InfluxDB OSS URLs](/influxdb/v2.5/reference/urls/). + +```toml +[[influxdb]] + # ... + urls = ["http://localhost:8086"] +``` + +### Provide InfluxDB authentication credentials +InfluxDB Cloud and InfluxDB OSS {{< current-version >}} require authentication. +Provide the following credentials in your `[[influxdb]].username` and `[[influxdb]].password` +configuration options: + +- **username:** InfluxDB username +- **password:** InfluxDB [API token](/influxdb/v2.5/security/tokens/) + +```toml +[[influxdb]] + # ... + username = "influxdb-username" + password = "influxdb-token" +``` + +{{% warn %}} +Kapacitor is subject to InfluxDB token permission restrictions. +To query or write to an InfluxDB bucket, the InfluxDB token must have read and/or +write permissions for the target bucket. +For information about token permissions, see [Create a token](/influxdb/v2.5/security/tokens/create-token/). +{{% /warn %}} + +### Disable InfluxDB subscriptions +InfluxDB Cloud and InfluxDB OSS {{< current-version >}} do not have subscriptions APIs. +Set the `[[influxdb]].disable-subscriptions`to `true` to disable InfluxDB subscriptions. + +```toml +[[influxdb]] + # ... + disable-subscriptions = true +``` + +## Use Kapacitor batch tasks +Kapacitor batch tasks use the `query` endpoint of the 1.x compatibility API +and require no change to use with InfluxDB Cloud and InfluxDB OSS. +For information about writing back to InfluxDB in Kapacitor tasks, +see [Write back to InfluxDB](#write-back-to-influxdb) below. + +## Use Kapacitor stream tasks +InfluxDB Cloud and OSS {{< current-version >}} do not have subscription APIs and do not support Kapacitor stream tasks directly. +To use Kapacitor stream tasks, write data directly to Kapacitor using the [Kapacitor `write` API](/{{< latest "kapacitor" >}}/working/api/#writing-data). We recommend using the [Telegraf InfluxDB output plugin](/{{< latest "telegraf" >}}/plugins/#output-influxdb) to write data to both InfluxDB Cloud or OSS and Kapacitor. + +##### Write data using the Telegraf InfluxDB output plugin + +To write data to both InfluxDB and Kapacitor using the InfluxDB output plugin, complete the following steps: + +1. [Install Telegraf](/{{< latest "telegraf" >}}/install/). +2. [Create a DBRP mapping](/influxdb/v2.5/query-data/influxql/dbrp/#create-dbrp-mappings). +3. In the [Telegraf InfluxDB output plugin](/{{< latest "telegraf" >}}/plugins/#output-influxdb) configuration file, specify the following options, replacing `database`, `retention_policy`, `username` and `password` to match your DBRP mapping, and set `skip_database_creation` to `true`: + +```toml +# Write to Kapacitor +[[outputs.influxdb]] + urls = ["http://localhost:9092"] + database = "example-db" + retention_policy = "example-rp" + +# Write to InfluxDB Cloud or OSS +[[outputs.influxdb]] + urls = ["http://localhost:8086"] + database = "example-db" + retention_policy = "example-rp" + username = "influxdb-username" + password = "influxdb-token" + skip_database_creation = true +``` + +## Write back to InfluxDB +If using the Kapacitor `InfluxDBOut` node to write data to InfluxDB {{< current-version >}}, +InfluxDB maps the specified database and retention policy to a corresponding bucket. +You can also manually map database/retention policy combinations (DBRPs) to buckets. +For more information, see [DBRP mapping](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/){{% oss-only %}}.{{% /oss-only %}} +{{% cloud-only %}}and [Create DBRP mappings](/influxdb/v2.5/query-data/influxql/dbrp/#create-dbrp-mappings).{{% /cloud-only %}} + +The following example TICKscript writes to the `my-db/my-rp` bucket in +InfluxDB Cloud or InfluxDB OSS {{< current-version >}}. + +```js +batch + |query('SELECT errors / total AS error_percent from requests') + // Write the transformed data to InfluxDB + |influxDBOut() + .database('my-db') + .retentionPolicy('my-rp') + .measurement('errors') + .tag('kapacitor', 'true') + .tag('version', '0.2') +``` diff --git a/content/influxdb/v2.5/upgrade/_index.md b/content/influxdb/v2.5/upgrade/_index.md new file mode 100644 index 000000000..1df670730 --- /dev/null +++ b/content/influxdb/v2.5/upgrade/_index.md @@ -0,0 +1,12 @@ +--- +title: Upgrade InfluxDB +description: > + Upgrade your version of InfluxDB +menu: influxdb_2_5 +weight: 3 +aliases: + - /influxdb/v2.5/reference/upgrading/ +influxdb/v2.5/tags: [upgrade] +--- + +{{< children >}} diff --git a/content/influxdb/v2.5/upgrade/downgrade.md b/content/influxdb/v2.5/upgrade/downgrade.md new file mode 100644 index 000000000..0df3676df --- /dev/null +++ b/content/influxdb/v2.5/upgrade/downgrade.md @@ -0,0 +1,42 @@ +--- +title: Downgrade InfluxDB +description: > + To downgrade from InfluxDB {{< current-version >}} to a previous 2.x version, + use the `influxd downgrade` command to downgrade the metadata schema used by + `influxd` to match the metadata schema of a older release. +menu: + influxdb_2_5: + parent: Upgrade InfluxDB + name: Downgrade InfluxDB +weight: 12 +related: + - /influxdb/v2.5/reference/cli/influxd/downgrade/ +--- + +To downgrade from InfluxDB {{< current-version >}} to a previous 2.x version, +use the `influxd downgrade` command to downgrade the metadata schema used by +`influxd` to match the metadata schema of a older release. + +InfluxDB does not guarantee backwards-compatibility with older releases in its embedded metadata stores. +Attempting to start an older `influxd` binary with a BoltDB or SQLite file that has +been migrated to a newer schema will result in a startup error similar to: + +``` +Error: up: reading migrations: migration "...": migration specification not found +``` + +**To downgrade from InfluxDB {{< current-version >}} to a previous 2.x version**: + +1. Run `influxd downgrade` using the **{{< current-version >}} `influxd` binary**. + Specify the previous InfluxDB version to downgrade to. + For example, to downgrade to InfluxDB 2.0: + + ```sh + influxd downgrade 2.0 + ``` + +2. Install the **earlier version** of InfluxDB: + + {{< influxdb/install-old-versions >}} + +3. Start InfluxDB with the **earlier versioned `influxd` binary**. diff --git a/content/influxdb/v2.5/upgrade/v1-to-v2/_index.md b/content/influxdb/v2.5/upgrade/v1-to-v2/_index.md new file mode 100644 index 000000000..616960b85 --- /dev/null +++ b/content/influxdb/v2.5/upgrade/v1-to-v2/_index.md @@ -0,0 +1,28 @@ +--- +title: Upgrade from InfluxDB 1.x to 2.5 +description: > + Explore different methods for upgrading from InfluxDB 1.x to InfluxDB 2.5 and + choose the best one for your use case. +menu: + influxdb_2_5: + parent: Upgrade InfluxDB + name: InfluxDB 1.x to 2.5 +weight: 11 +--- + +Explore different methods for upgrading from InfluxDB 1.x to InfluxDB {{< current-version >}} and +determine which best suits your use case. +Consider the following: + +#### Do you want to migrate all your time series data? +[Automatically upgrade to InfluxDB {{< current-version >}}](/influxdb/v2.5/upgrade/v1-to-v2/automatic-upgrade/). + +#### Do you want to selectively migrate your time series data? +[Manually upgrade to InfluxDB {{< current-version >}}](/influxdb/v2.5/upgrade/v1-to-v2/manual-upgrade/). + +#### Are you using Docker? +[Upgrade to the 2.x Docker image](/influxdb/v2.5/upgrade/v1-to-v2/docker/). + +#### Are you using continuous queries (CQs)? +After you upgrade (automatically, manually, or using Docker), +[migrate your 1.x CQs to InfluxDB {{< current-version >}} tasks](/influxdb/v2.5/upgrade/v1-to-v2/migrate-cqs/). diff --git a/content/influxdb/v2.5/upgrade/v1-to-v2/automatic-upgrade.md b/content/influxdb/v2.5/upgrade/v1-to-v2/automatic-upgrade.md new file mode 100644 index 000000000..8be8f8bce --- /dev/null +++ b/content/influxdb/v2.5/upgrade/v1-to-v2/automatic-upgrade.md @@ -0,0 +1,280 @@ +--- +title: Automatically upgrade from InfluxDB 1.x to 2.1 +list_title: Automatically upgrade from 1.x to 2.1 +description: > + Use the `influx upgrade` tool to automatically upgrade from InfluxDB 1.x to InfluxDB 2.1. +menu: + influxdb_2_5: + parent: InfluxDB 1.x to 2.5 + name: Automatically upgrade +weight: 10 +aliases: + - /influxdb/v2.5/reference/upgrading/influxd-upgrade-guide/ +related: + - /influxdb/v2.5/reference/cli/influxd/upgrade/ + - /influxdb/v2.5/upgrade/v1-to-v2/manual-upgrade/ + - /influxdb/v2.5/upgrade/v1-to-v2/docker/ +--- + +Use the `influxd upgrade` command to upgrade InfluxDB 1.x to InfluxDB {{< current-version >}}. +The `upgrade` command provides an in-place upgrade from InfluxDB 1.x to InfluxDB {{< current-version >}}. + +Specifically, the upgrade process does the following: + +1. Reads the existing InfluxDB 1.x configuration file and generates an equivalent InfluxDB {{< current-version >}} configuration file at `~/.influxdbv2/config.toml` or at a custom path specified with the `--v2-config-path` flag. +2. Upgrades metadata and storage engine paths to `~/.influxdbv2/meta` and `~/.influxdbv2/engine`, respectively (unless otherwise specified). +3. Writes existing data and write ahead log (WAL) files into InfluxDB {{< current-version >}} [buckets](/influxdb/v2.5/reference/glossary/#bucket). +4. Creates [database and retention policy (DBRP) mappings](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/) required to query data with InfluxQL. +5. Reads existing metadata and migrates non-admin users, passwords, and permissions into a 1.x authorization–compatible store within `~/influxdbv2/influxdb.bolt`. + +When starting InfluxDB {{< current-version >}} after running `influxdb upgrade`, InfluxDB must build a new time series index (TSI). +Depending on the volume of data present, this may take some time. + +## Important considerations before you begin + +Before upgrading to InfluxDB {{< current-version >}}, consider the following guidelines. +Some or all might apply to your specific installation and use case. +The sections below contain our recommendations for addressing possible gaps in the upgrade process. +Consider whether you need to address any of the following before upgrading. + +- [Available operating system, container, and platform support](#available-operating-system-container-and-platform-support) +- [Continuous queries](#continuous-queries) +- [Supported protocols](#supported-protocols) +- [Kapacitor](#kapacitor) +- [User migration](#user-migration) +- [Dashboards](#dashboards) +- [Other data](#other-data) +- [Secure by default](#secure-by-default) +- [`inmem` indexing option](#in-memory-indexing-option) +- [Interactive shell](#interactive-shell) + +### Available operating system, container, and platform support + +InfluxDB {{< current-version >}} is currently available for macOS, Linux, and Windows. + +{{% note %}} +InfluxDB {{< current-version >}} requires 64-bit operating systems. +{{% /note %}} + +### Continuous queries + +Continuous queries are replaced by **tasks** in InfluxDB {{< current-version >}}. +By default, `influxd upgrade` writes all continuous queries to `~/continuous_queries.txt`. +To convert continuous queries to InfluxDB tasks, see +[Migrate continuous queries to tasks](/influxdb/v2.5/upgrade/v1-to-v2/migrate-cqs/). + +### Supported protocols + +InfluxDB {{< current-version >}} doesn't directly support the alternate write protocols +[supported in InfluxDB 1.x](/influxdb/v1.8/supported_protocols/) +(CollectD, Graphite, OpenTSDB, Prometheus, UDP). +Use [Telegraf](/{{< latest "telegraf" >}}/) to translate these protocols to line protocol. + +### Kapacitor + +You can continue to use Kapacitor with InfluxDB OSS {{< current-version >}} under the following scenarios: + +- Kapacitor Batch-style TICKscripts work with the 1.x read compatible API. + Existing Kapacitor user credentials should continue to work using the [1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). +- InfluxDB {{< current-version >}} has no subscriptions API and does not support Kapacitor stream tasks. + To continue using stream tasks, write data directly to both InfluxDB and Kapacitor. + Use **Telegraf** and its [InfluxDB output plugin](/{{< latest "telegraf" >}}/plugins/#output-influxdb) + to write to Kapacitor and the [InfluxDB v2 output plugin](/{{< latest "telegraf" >}}/plugins/#output-influxdb_v2) to write to InfluxDB v2. + +##### Example Telegraf configuration +```toml +# Write to Kapacitor +[[outputs.influxdb]] + urls = ["http://localhost:9092"] + database = "example-db" + retention_policy = "example-rp" + +# Write to InfluxDB {{< current-version >}} +[[outputs.influxdb]] + urls = ["http://localhost:8086"] + database = "example-db" + retention_policy = "example-rp" + username = "v1-auth-username" + password = "v1-auth-password" +``` + +### User migration + +`influxd upgrade` migrates existing 1.x users and their permissions **except** the following users: + +- [1.x admin users](/{{< latest "influxdb" "v1" >}}/administration/authentication_and_authorization/#admin-users) +- [1.x non-admin users](/{{< latest "influxdb" "v1" >}}/administration/authentication_and_authorization/#non-admin-users) + that have not been granted any privileges + +{{< expand-wrapper >}} +{{% expand "Review 1.x user privileges" %}} +**To review 1.x users with admin privileges**, run the following against your InfluxDB 1.x instance: + +```sql +SHOW USERS +``` + +Users with `admin` set to `true` will **not** be migrated. + +**To review the specific privileges granted to each 1.x user**, run the following for each user in your InfluxDB 1.x instance: + +```sql +SHOW GRANTS FOR "" +``` + +If no grants appear, the user will **not** be migrated. +{{% /expand %}} +{{< /expand-wrapper >}} + +If using an admin user for visualization or Chronograf administrative functions, +**create a new read-only user before upgrading**: + +##### Create a read-only 1.x user +```sh +> CREATE USER WITH PASSWORD '' +> GRANT READ ON TO "" +``` + +InfluxDB {{< current-version >}} only grants admin privileges to the primary user set up during the InfluxDB {{< current-version >}} upgrade. +This provides you with the opportunity to reassess who to grant admin permissions when setting up InfluxDB {{< current-version >}}. + +### Dashboards + +You can continue to use your existing dashboards and visualization tools with InfluxDB {{< current-version >}} via the [1.x read compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). +The upgrade process creates [DBRP mappings](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/) to ensure existing users can execute InfluxQL queries with the appropriate permissions. + +However, if your dashboard tool is configured using a user with admin permissions, +you will need to create a new read-only user with the appropriate database permissions *before* upgrading. +This new username and password combination should be used within the data source configurations to continue to provide read-only access to the underlying data. + +Ensure your dashboards are all functioning before upgrading. + +### Other data + +The 1.x `_internal` database is not migrated with the `influxd upgrade` command. +To collect, store, and monitor similar internal InfluxDB metrics, +[create an InfluxDB {{< current-version >}} scraper](/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/create-a-scraper/) +to scrape data from the `/metrics` endpoint and store them in a bucket. + +### Secure by default + +InfluxDB {{< current-version >}} requires authentication and does not support the InfluxDB 1.x `auth-enabled = false` configuration option. + +Before upgrading to {{< current-version >}}, [enable authentication in your InfluxDB 1.x instance](/influxdb/v1.8/administration/authentication_and_authorization/#set-up-authentication) +and test your credentials to ensure your applications, agents, and visualization tools can connect to InfluxDB. + +If you upgrade with `auth-enabled = false`, the upgrade may appear complete, +but client requests to InfluxDB {{< current-version >}} may be silently ignored (you won't see a notification the request was denied). + +### In-memory indexing option + +InfluxDB {{< current-version >}} doesn't support [in-memory (`inmem`) indexing](/influxdb/v1.8/administration/config/#in-memory-inmem-index-settings). The following InfluxDB 1.x configuration options associated with `inmem` indexing are ignored in the upgrade process: + +- `max-series-per-database` +- `max-values-per-tag` + +### Interactive shell + +The InfluxDB {{< current-version >}} `influx` CLI includes an interactive **InfluxQL shell** for executing InfluxQL queries. +To start an InfluxQL shell: + +1. Download and install in the [`influx` CLI](/influxdb/v2.5/tools/influx-cli/). +2. Set up your [`influx` CLI authentication credentials](/influxdb/v2.5/tools/influx-cli/#set-up-the-influx-cli). +3. Run the `influx v1 shell` command. + +For more information see: + +- [Use the InfluxQL shell](/influxdb/v2.5/tools/influxql-shell/) +- [Query data with InfluxQL](/influxdb/v2.5/query-data/influxql/) + +To build an interactive shell to execute **Flux** queries, +[compile and build a command line Flux REPL from source](https://github.com/influxdata/flux/blob/master/README.md#getting-started). + +## Perform the upgrade + +If you've considered the [guidance above](#important-considerations-before-you-begin) +and are ready to proceed, follow these steps to upgrade your InfluxDB 1.x to InfluxDB {{< current-version >}}. + +1. [Download InfluxDB OSS {{< current-version >}}](https://portal.influxdata.com/downloads/). + Unpackage the InfluxDB binaries and place them in your `$PATH`. +2. Stop your running InfluxDB 1.x instance. + Make a backup copy of all 1.x data before upgrading: + + ```sh + cp -R .influxdb/ .influxdb_bak/ + ``` +3. Use `influxd version` to ensure you are running InfluxDB {{< current-version >}} from the command line. + The `influxd upgrade` command is only available in InfluxDB {{< current-version >}}. +4. If your 1.x configuration file is at the [default location](/influxdb/v1.8/administration/config/#using-the-configuration-file), run: + ```sh + influxd upgrade + ``` + {{% note %}} +#### Upgrade `.deb` packages +When installed from a `.deb` package, InfluxDB 1.x and 2.x run under the `influxdb` user. +If you've installed both versions from `.deb` packages, run the upgrade command +as the `influxdb` user: + +```sh +sudo -u influxdb influxd upgrade +``` + {{% /note %}} + If your 1.x configuration file is not at the default location, run: + + ```sh + influxd upgrade --config-file + ``` + + To store the upgraded {{< current-version >}} configuration file in a custom location, include the `--v2-config-path` flag: + + ```sh + influxd upgrade --v2-config-path + ``` + + +5. Follow the prompts to set up a new InfluxDB {{< current-version >}} instance. + + ``` + Welcome to InfluxDB {{< current-version >}} upgrade! + Please type your primary username: + + Please type your password: + + Please type your password again: + + Please type your primary organization name: + + Please type your primary bucket name: + + Please type your retention period in hours. + Or press ENTER for infinite: + + You have entered: + Username: + Organization: + Bucket: + Retention Period: infinite + Confirm? (y/n): y + ``` + +The output of the upgrade prints to standard output. +It is also saved (for troubleshooting and debugging) in the current directory to a file called `upgrade.log` located in the home directory of the user running `influxdb upgrade`. + +## Post-upgrade + +### Verify 1.x users were migrated to {{< current-version >}} + +To verify 1.x users were successfully migrated to {{< current-version >}}, run +[`influx v1 auth list`](/influxdb/v2.5/reference/cli/influx/v1/auth/list/). + +#### Add authorizations for external clients + +If your InfluxDB 1.x instance **did not have authentication enabled** and the +`influx v1 auth list` doesn't return any users, external clients connected to +your 1.x instance will not be able to access InfluxDB {{< current-version >}}, which requires authentication. + +**For these external clients to work with InfluxDB {{< current-version >}}:** + +1. [Manually create a 1.x-compatible authorization](/influxdb/v2.5/upgrade/v1-to-v2/manual-upgrade/#create-a-1x-compatible-authorization). +2. Update the client configuration to use the username and password associated + with your 1.x-compatible authorization. diff --git a/content/influxdb/v2.5/upgrade/v1-to-v2/docker.md b/content/influxdb/v2.5/upgrade/v1-to-v2/docker.md new file mode 100644 index 000000000..af5064e11 --- /dev/null +++ b/content/influxdb/v2.5/upgrade/v1-to-v2/docker.md @@ -0,0 +1,290 @@ +--- +title: Upgrade from InfluxDB 1.x to 2.5 with Docker +list_title: Upgrade from 1.x to 2.5 with Docker +description: > + Use the automated upgrade process built into the InfluxDB 2.x Docker image to + update InfluxDB 1.x Docker deployments to InfluxDB 2.x. +menu: + influxdb_2_5: + parent: InfluxDB 1.x to 2.5 + name: Upgrade with Docker +weight: 101 +--- + +Use the automated upgrade process built into the [InfluxDB 2.x Docker image](https://hub.docker.com/_/influxdb) +to update InfluxDB 1.x Docker deployments to InfluxDB 2.x. + +- [Upgrade requirements](#upgrade-requirements) +- [Minimal upgrade](#minimal-upgrade) +- [Upgrade with a custom InfluxDB 1.x configuration file](#upgrade-with-a-custom-influxdb-1-x-configuration-file) +- [Upgrade with custom paths](#upgrade-with-custom-paths) +- [Use new InfluxDB tools](#use-new-influxdb-tools) + +{{% note %}} +#### Export continuous queries before upgrading +The automated upgrade process **does not** migrate InfluxDB 1.x continuous queries (CQs) +to InfluxDB 2.x tasks (the 2.x equivalent). Export all of your CQs before upgrading to InfluxDB 2.x. +For information about exporting and migrating CQs to tasks, see +[Migrate continuous queries to tasks](/influxdb/v2.5/upgrade/v1-to-v2/migrate-cqs/). +{{% /note %}} + +## Upgrade requirements +InfluxDB 2.x provides a 1.x compatibility API, but expects a different storage layout on disk. +To account for theses differences, the InfluxDB Docker image migrates +1.x data and into 2.x-compatible data automatically before booting the `influxd` server. + +- [InfluxDB 2.x initialization credentials](#influxdb-2x-initialization-credentials) +- [File system mounts](#file-system-mounts) +- [Upgrade initialization mode](#upgrade-initialization-mode) + +{{% note %}} +To ensure InfluxDB reboots post-upgrade without overwriting migrated data, +the upgrade won't run if an existing boltdb file is found at the +[configured 2.x configuration path](#file-system-mounts). +{{% /note %}} + +Find more information about the automated InfluxDB upgrade process, +see [Automatically upgrade from InfluxDB 1.x to {{< current-version >}}](/influxdb/v2.5/upgrade/v1-to-v2/automatic-upgrade/). + +### InfluxDB 2.x initialization credentials +The automated InfluxDB upgrade process bootstraps an initial admin user, +[organization](/influxdb/v2.5/reference/glossary/#organization), and +[bucket](/influxdb/v2.5/reference/glossary/#bucket) required by InfluxDB 2.x. +Set the following [environment variables in your Docker container](https://docs.docker.com/search/?q=environment%20variables) +to provide setup credentials: + +- `DOCKER_INFLUXDB_INIT_USERNAME`: Username to set for the admin user ({{< req >}}). +- `DOCKER_INFLUXDB_INIT_PASSWORD`: Password to set for the admin user ({{< req >}}). +- `DOCKER_INFLUXDB_INIT_ORG`: Name to set for the initial organization ({{< req >}}). +- `DOCKER_INFLUXDB_INIT_BUCKET`: Name to set for the initial bucket ({{< req >}}). +- `DOCKER_INFLUXDB_INIT_RETENTION`: Duration for the initial bucket's retention period. + If not set, the initial bucket will retain data forever. +- `DOCKER_INFLUXDB_INIT_ADMIN_TOKEN`: API token to associate with the admin user. + If not set, InfluxDB automatically generates a token. + +### File system mounts +The InfluxDB upgrade process requires extra volumes to be mounted into the 2.x container. +Use **environment variables** and **Docker mounts** to specify and configure the +appropriate mount paths for the following: + +- 1.x data on disk +- Custom 1.x configuration file (if any) +- 2.x data on disk (`/var/lib/influxdb2`) +- 2.x configuration directory (`/etc/influxdb2`) + +The InfluxDB upgrade process searches for mounted 1.x data and configuration files +in the following priority order: + +1. 1.x configuration file specified by the `DOCKER_INFLUXDB_INIT_UPGRADE_V1_CONFIG` environment variable +2. 1.x data directory specified by the `DOCKER_INFLUXDB_INIT_UPGRADE_V1_DIR` environment variable +3. 1.x configuration file mounted at `/etc/influxdb/influxdb.conf` +4. 1.x data directory mounted at `/var/lib/influxdb` + +{{% note %}} +#### Avoid data loss +By default, the automated upgrade process generates both data and configuration files +under `/var/lib/influxdb2` and `/etc/influxdb2`. +We recommend mounting volumes at both paths to avoid losing data. +{{% /note %}} + +### Upgrade initialization mode +Set the `DOCKER_INFLUXDB_INIT_MODE` environment variable to `upgrade`. + +## Minimal upgrade +If you're currently running a minimal InfluxDB 1.x deployment similar to: + +```sh +docker run -p 8086:8086 \ + -v influxdb:/var/lib/influxdb \ + influxdb:1.8 +``` + +**To upgrade this minimal deployment to InfluxDB 2.x:** + +1. Stop the running InfluxDB 1.x container. +2. Start the InfluxDB container with the following: + + - Volume mount for the InfluxDB 1.x data directory + - Volume mount for the InfluxDB 2.x data directory + - InfluxDB [initialization mode](#upgrade-initialization-mode) environment variable + - InfluxDB [initialization credential](#influxdb-2x-initialization-credentials) environment variables + - `influxdb:{{< current-version >}}` Docker image + + ```sh + docker run -p 8086:8086 \ + -v influxdb:/var/lib/influxdb \ + -v influxdb2:/var/lib/influxdb2 \ + -e DOCKER_INFLUXDB_INIT_MODE=upgrade \ + -e DOCKER_INFLUXDB_INIT_USERNAME=my-user \ + -e DOCKER_INFLUXDB_INIT_PASSWORD=my-password \ + -e DOCKER_INFLUXDB_INIT_ORG=my-org \ + -e DOCKER_INFLUXDB_INIT_BUCKET=my-bucket \ + influxdb:{{< current-version >}} + ``` + +## Upgrade with a custom InfluxDB 1.x configuration file +If you're currently running an InfluxDB 1.x deployment with a custom configuration +file similar to: + +```sh +docker run -p 8086:8086 \ + -v influxdb:/var/lib/influxdb \ + -v $PWD/influxdb.conf:/etc/influxdb/influxdb.conf:ro \ + influxdb:1.8 +``` + +**To upgrade an InfluxDB 1.x deployment with a custom configuration file to InfluxDB 2.x:** + +1. Stop the running InfluxDB 1.x container. +2. Start the InfluxDB container with the following: + + - Volume mount for the InfluxDB 1.x data directory + - Volume mount for the InfluxDB 1.x configuration file + - Volume mount for the InfluxDB 2.x data directory (`/var/lib/influxdb2`) + - Volume mount for the InfluxDB 2.x configuration directory (`/etc/influxdb2`) + - InfluxDB [initialization mode](#upgrade-initialization-mode) environment variable + - InfluxDB [initialization credential](#influxdb-2x-initialization-credentials) environment variables + - `influxdb:{{< current-version >}}` Docker image + +```sh +docker run -p 8086:8086 \ + -v influxdb:/var/lib/influxdb \ + -v $PWD/influxdb.conf:/etc/influxdb/influxdb.conf:ro \ + -v influxdb2:/var/lib/influxdb2 \ + -v influxdb2:/etc/influxdb2 \ + -e DOCKER_INFLUXDB_INIT_MODE=upgrade \ + -e DOCKER_INFLUXDB_INIT_USERNAME=my-user \ + -e DOCKER_INFLUXDB_INIT_PASSWORD=my-password \ + -e DOCKER_INFLUXDB_INIT_ORG=my-org \ + -e DOCKER_INFLUXDB_INIT_BUCKET=my-bucket \ + influxdb:{{< current-version >}} +``` + +## Upgrade with custom paths +If you're currently running an InfluxDB 1.x deployment with the data directory and +configuration file mounted at custom paths similar to: + +```sh +docker run -p 8086:8086 \ + -v influxdb:/root/influxdb/data \ + -v $PWD/influxdb.conf:/root/influxdb/influxdb.conf:ro \ + influxdb:1.8 -config /root/influxdb/influxdb.conf +``` + +**To upgrade an InfluxDB 1.x deployment with custom paths to InfluxDB 2.x:** + +1. Stop the running InfluxDB 1.x container. +2. Decide to either **keep using custom paths** or **use InfluxDB 2.x default paths**. + +{{< tabs-wrapper >}} +{{% tabs %}} +[Keep using custom paths](#) +[Use InfluxDB 2.x defaults](#) +{{% /tabs %}} + +{{% tab-content %}} + +To retain your custom InfluxDB 1.x paths, start the InfluxDB container with the following: + +- Volume mount for the InfluxDB 1.x data directory +- Volume mount for the InfluxDB 1.x configuration file +- Volume mount for the InfluxDB 2.x data directory +- Volume mount for the InfluxDB 2.x configuration directory +- InfluxDB [initialization mode](#upgrade-initialization-mode) environment variable +- InfluxDB [initialization credential](#influxdb-2x-initialization-credentials) environment variables +- InfluxDB 2.x [v1 configuration file path](#file-system-mounts) environment variable: + - `DOCKER_INFLUXDB_INIT_UPGRADE_V1_CONFIG` +- InfluxDB 1.x custom path environment variables: + - `INFLUXD_CONFIG_PATH` + - `INFLUXD_BOLT_PATH` + - `INFLUXD_ENGINE_PATH` +- `influxdb:{{< current-version >}}` Docker image + +```sh +docker run -p 8086:8086 \ + -v influxdb:/root/influxdb/data \ + -v $PWD/influxdb.conf:/root/influxdb/influxdb.conf:ro \ + -v influxdb2:/root/influxdb2/data \ + -v influxdb2:/root/influxdb2 \ + -e DOCKER_INFLUXDB_INIT_MODE=upgrade \ + -e DOCKER_INFLUXDB_INIT_USERNAME=my-user \ + -e DOCKER_INFLUXDB_INIT_PASSWORD=my-password \ + -e DOCKER_INFLUXDB_INIT_ORG=my-org \ + -e DOCKER_INFLUXDB_INIT_BUCKET=my-bucket \ + -e DOCKER_INFLUXDB_INIT_UPGRADE_V1_CONFIG=/root/influxdb/influxdb.conf \ + -e INFLUXD_CONFIG_PATH=/root/influxdb2/config.toml \ + -e INFLUXD_BOLT_PATH=/root/influxdb2/influxdb.bolt \ + -e INFLUXD_ENGINE_PATH=/root/influxdb2/engine \ + influxdb:{{< current-version >}} +``` +{{% /tab-content %}} + + +{{% tab-content %}} +To use default InfluxDB 2.x paths, start the InfluxDB container with the following: + +- Volume mount for the InfluxDB 1.x data directory +- Volume mount for the InfluxDB 1.x configuration file +- Volume mount for the InfluxDB 2.x data directory (`/var/lib/influxdb2`) +- Volume mount for the InfluxDB 2.x configuration directory (`/etc/influxdb2`) +- InfluxDB [initialization mode](#upgrade-initialization-mode) environment variable +- InfluxDB [initialization credential](#influxdb-2x-initialization-credentials) environment variables +- InfluxDB 2.x [v1 configuration file path](#file-system-mounts) environment variable: + - `DOCKER_INFLUXDB_INIT_UPGRADE_V1_CONFIG` +- `influxdb:{{< current-version >}}` Docker image + +```sh +docker run -p 8086:8086 \ + -v influxdb:/root/influxdb/data \ + -v $PWD/influxdb.conf:/root/influxdb/influxdb.conf:ro \ + -v influxdb2:/var/lib/influxdb2 \ + -v influxdb2:/etc/influxdb2 \ + -e DOCKER_INFLUXDB_INIT_MODE=upgrade \ + -e DOCKER_INFLUXDB_INIT_USERNAME=my-user \ + -e DOCKER_INFLUXDB_INIT_PASSWORD=my-password \ + -e DOCKER_INFLUXDB_INIT_ORG=my-org \ + -e DOCKER_INFLUXDB_INIT_BUCKET=my-bucket \ + -e DOCKER_INFLUXDB_INIT_UPGRADE_V1_CONFIG=/root/influxdb/influxdb.conf \ + influxdb:{{< current-version >}} +``` +{{% /tab-content %}} + +{{< /tabs-wrapper >}} + +## Use new InfluxDB tools + +Once upgraded, use InfluxDB {{< current-version >}} tools to work with your time series data. + +- [Migrate continuous queries to tasks](#migrate-continuous-queries-to-tasks) +- [Use the interactive InfluxQL shell](#use-the-interactive-influxql-shell) + +### Migrate continuous queries to tasks + +InfluxDB {{< current-version >}} replaces continuous queries with **tasks**. +By default, the upgrade process writes all continuous queries to `~/continuous_queries.txt`. +To convert continuous queries to InfluxDB tasks, see +[Migrate continuous queries to tasks](/influxdb/v2.5/upgrade/v1-to-v2/migrate-cqs/). + +### Use the interactive InfluxQL shell + +The InfluxDB {{< current-version >}} `influx` CLI includes an interactive **InfluxQL shell** for executing InfluxQL queries. +The InfluxDB {{< current-version >}} Docker image includes the `influx` CLI. + +To start an InfluxQL shell: + +1. Start a bash session in your InfluxDB container: + + ```sh + docker exec -it influxdb /bin/bash + ``` + +2. Set up your [`influx` CLI authentication credentials](/influxdb/v2.5/tools/influx-cli/#set-up-the-influx-cli). +3. Run the `influx v1 shell` command. + +For more information, see: + +- [Use the InfluxQL shell](/influxdb/v2.5/tools/influxql-shell/) +- [Query data with InfluxQL](/influxdb/v2.5/query-data/influxql/) + +To build an interactive shell to execute **Flux** queries, +[compile and build a command line Flux REPL from source](https://github.com/influxdata/flux/blob/master/README.md#getting-started). diff --git a/content/influxdb/v2.5/upgrade/v1-to-v2/manual-upgrade.md b/content/influxdb/v2.5/upgrade/v1-to-v2/manual-upgrade.md new file mode 100644 index 000000000..2c6279fa9 --- /dev/null +++ b/content/influxdb/v2.5/upgrade/v1-to-v2/manual-upgrade.md @@ -0,0 +1,346 @@ +--- +title: Manually upgrade from InfluxDB 1.x to 2.5 +list_title: Manually upgrade from 1.x to 2.5 +description: > + To manually upgrade from InfluxDB 1.x to InfluxDB 2.5, migrate data, create + 1.x-compatible authorizations, and create database and retention policy + (DBRP) mappings. +menu: + influxdb_2_5: + parent: InfluxDB 1.x to 2.5 + name: Manually upgrade +weight: 11 +related: + - /influxdb/v2.5/upgrade/v1-to-v2/ + - /influxdb/v2.5/upgrade/v1-to-v2/migrate-cqs/ +--- + +To manually upgrade from InfluxDB 1.x to InfluxDB {{< current-version >}}: + +1. [Install InfluxDB {{< current-version >}}](#install-influxdb-21) +2. [Migrate custom configuration settings](#migrate-custom-configuration-settings) +3. [Create DBRP mappings](#create-dbrp-mappings) +4. [Create authorizations](#create-authorizations) +5. [Migrate time series data](#migrate-time-series-data) +6. [Migrate continuous queries](#migrate-continuous-queries) +7. [Query data with InfluxQL](#query-data-with-influxql) + +## Install InfluxDB {{< current-version >}} +[Download, install, and set up InfluxDB {{< current-version >}}](/influxdb/v2.5/get-started/). + +{{% note %}} +#### Required 2.x credentials +All InfluxDB {{< current-version >}} `influx` CLI examples below assume the required **host**, +**organization**, and **API token** credentials are provided by your +[`influx` CLI configuration](/influxdb/v2.5/reference/cli/influx/#provide-required-authentication-credentials). +{{% /note %}} + +## Migrate custom configuration settings +If you're using custom configuration settings in your InfluxDB 1.x instance, do the following: + +1. Compare 1.x and {{< current-version >}} configuration settings: + + {{< expand-wrapper >}} + {{% expand "View configuration option parity" %}} + +| 1.x configuration option | {{< current-version >}} configuration option | +| :--------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------- | +| **[data]** | | +| dir | [engine-path](/influxdb/v2.5/reference/config-options/#engine-path) | +| wal-dir | [engine-path](/influxdb/v2.5/reference/config-options/#engine-path) | +| wal-fsync-delay | [storage-wal-fsync-delay](/influxdb/v2.5/reference/config-options/#storage-wal-fsync-delay) | +| index-version | | +| trace-logging-enabled | | +| query-log-enabled | | +| strict-error-handling | | +| validate-keys | [storage-validate-keys](/influxdb/v2.5/reference/config-options/#storage-validate-keys) | +| cache-max-memory-size | [storage-cache-max-memory-size](/influxdb/v2.5/reference/config-options/#storage-cache-max-memory-size) | +| cache-snapshot-memory-size | [storage-cache-snapshot-memory-size](/influxdb/v2.5/reference/config-options/#storage-cache-snapshot-memory-size) | +| cache-snapshot-write-cold-duration | [storage-cache-snapshot-write-cold-duration](/influxdb/v2.5/reference/config-options/#storage-cache-snapshot-write-cold-duration) | +| compact-full-write-cold-duration | [storage-compact-full-write-cold-duration](/influxdb/v2.5/reference/config-options/#storage-compact-full-write-cold-duration) | +| max-concurrent-compactions | [storage-max-concurrent-compactions](/influxdb/v2.5/reference/config-options/#storage-max-concurrent-compactions) | +| compact-throughput | | +| compact-throughput-burst | [storage-compact-throughput-burst](/influxdb/v2.5/reference/config-options/#storage-compact-throughput-burst) | +| tsm-use-madv-willneed | [storage-tsm-use-madv-willneed](/influxdb/v2.5/reference/config-options/#storage-tsm-use-madv-willneed) | +| max-series-per-database | | +| max-values-per-tag | | +| max-index-log-file-size | [storage-max-index-log-file-size](/influxdb/v2.5/reference/config-options/#storage-max-index-log-file-size) | +| series-id-set-cache-size | [storage-series-id-set-cache-size](/influxdb/v2.5/reference/config-options/#storage-series-id-set-cache-size) | +| | | +| **[retention]** | | +| check-interval | [storage-retention-check-interval](/influxdb/v2.5/reference/config-options/#storage-retention-check-interval) | +| | | +| **[shard-precreation]** | | +| check-interval | [storage-shard-precreator-check-interval](/influxdb/v2.5/reference/config-options/#storage-shard-precreator-check-interval) | +| advance-period | [storage-shard-precreator-advance-period](/influxdb/v2.5/reference/config-options/#storage-shard-precreator-advance-period) | +| | | +| **[http]** | | +| flux-enabled | | +| flux-log-enabled | [flux-log-enabled](/influxdb/v2.5/reference/config-options/#flux-log-enabled) | +| bind-address | [http-bind-address](/influxdb/v2.5/reference/config-options/#http-bind-address) | +| auth-enabled | | +| realm | | +| log-enabled | | +| suppress-write-log | | +| access-log-path | | +| access-log-status-filters | | +| write-tracing | | +| pprof-enabled | [pprof-disabled](/influxdb/v2.5/reference/config-options/#pprof-disabled) | +| pprof-auth-enabled | | +| debug-pprof-enabled | | +| ping-auth-enabled | | +| https-enabled | | +| https-certificate | [tls-cert](/influxdb/v2.5/reference/config-options/#tls-cert) | +| https-private-key | [tls-key](/influxdb/v2.5/reference/config-options/#tls-key) | +| shared-secret | | +| max-row-limit | | +| max-connection-limit | | +| unix-socket-enabled | | +| bind-socket | | +| max-body-size | | +| max-concurrent-write-limit | | +| max-enqueued-write-limit | | +| enqueued-write-timeout | [http-write-timeout](/influxdb/v2.5/reference/config-options/#http-write-timeout) | +| | | +| **[logging]** | | +| format | | +| level | [log-level](/influxdb/v2.5/reference/config-options/#log-level) | +| suppress-logo | | +| | | +| **[tls]** | | +| ciphers | [tls-strict-ciphers](/influxdb/v2.5/reference/config-options/#tls-strict-ciphers) | +| min-version | [tls-min-version](/influxdb/v2.5/reference/config-options/#tls-min-version) | +| max-version | | + +{{% note %}} +#### 1.x configuration groups not in {{< current-version >}} +The following 1.x configuration groups **do not** apply to InfluxDB {{< current-version >}}: + +- meta +- coordinator +- monitor +- subscriber +- graphite +- collectd +- opentsdb +- udp +- continuous_queries +{{% /note %}} + {{% /expand %}} + {{< /expand-wrapper >}} + +2. Apply your 1.x custom settings to the comparable InfluxDB {{< current-version >}} settings using + `influxd` flags, environment variables, or a {{< current-version >}} configuration file. + For more information about configuring InfluxDB {{< current-version >}}, see + [Configuration options](/influxdb/v2.5/reference/config-options/). +3. **Restart `influxd`**. + +## Create DBRP mappings +InfluxDB database and retention policy (DBRP) mappings associate database and +retention policy combinations with InfluxDB {{< current-version >}} [buckets](/influxdb/v2.5/reference/glossary/#bucket). +These mappings allow InfluxDB 1.x clients to successfully query and write to +InfluxDB {{< current-version >}} buckets while using the 1.x DBRP convention. + +_For more information about DBRP mapping, see +[Database and retention policy mapping](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/)._ + +**To map a DBRP combination to an InfluxDB {{< current-version >}} bucket:** + +1. **Create a bucket** + [Create an InfluxDB {{< current-version >}} bucket](/influxdb/v2.5/organizations/buckets/create-bucket/). + We recommend creating a bucket for each unique 1.x database and retention + policy combination using the following naming convention: + + ```sh + # Naming convention + db-name/rp-name + + # Example + telegraf/autogen + ``` + +2. **Create a DBRP mapping** + Use the [`influx v1 dbrp create` command](/influxdb/v2.5/reference/cli/influx/v1/dbrp/create/) + to create a DBRP mapping. + Provide the following: + + - database name + - retention policy name _(not retention period)_ + - [bucket ID](/influxdb/v2.5/organizations/buckets/view-buckets/) + - _(optional)_ `--default` flag if you want the retention policy to be the default retention + policy for the specified database + + {{< code-tabs-wrapper >}} + {{% code-tabs %}} +[DB with one RP](#) +[DB with multiple RPs](#) + {{% /code-tabs %}} + {{% code-tab-content %}} +```sh +influx v1 dbrp create \ + --db example-db \ + --rp example-rp \ + --bucket-id 00xX00o0X001 \ + --default +``` + {{% /code-tab-content %}} + {{% code-tab-content %}} +```sh +# Create telegraf/autogen DBRP mapping with autogen +# as the default RP for the telegraf DB + +influx v1 dbrp create \ + --db telegraf \ + --rp autogen \ + --bucket-id 00xX00o0X001 \ + --default + +# Create telegraf/downsampled-daily DBRP mapping that +# writes to a different bucket + +influx v1 dbrp create \ + --db telegraf \ + --rp downsampled-daily \ + --bucket-id 00xX00o0X002 +``` + {{% /code-tab-content %}} + {{< /code-tabs-wrapper >}} + +3. **Confirm the DBRP mapping was created** + Use the [`influx v1 dbrp list`](/influxdb/v2.5/reference/cli/influx/v1/dbrp/list/) to list existing DBRP mappings. + + ```sh + influx v1 dbrp list + ``` + +For information about managing DBRP mappings, see the +[`influx v1 dbrp` command documentation](/influxdb/v2.5/reference/cli/influx/v1/dbrp/). + +## Create authorizations +InfluxDB {{< current-version >}} requires authentication and provides two authentication methods: + +- [Token authentication](#token-authentication) +- [1.x compatible authorizations](#1x-compatible-authorizations) + +### Token authentication +Use [InfluxDB {{< current-version >}} token authentication](/influxdb/v2.5/security/tokens/) to +authenticate requests to InfluxDB {{< current-version >}}. + +##### Recommended if: +- Your 1.x instance **does not have authentication enabled**. + +{{% note %}} +#### Use tokens with basic authentication +To use tokens with InfluxDB clients that require an InfluxDB username and password, +provide an arbitrary user name and pass the token as the password. +{{% /note %}} + +### 1.x-compatible authorizations +InfluxDB {{< current-version >}} provides a [1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/) +that lets you authenticate using a username and password as in InfluxDB 1.x. +If authentication is enabled in your InfluxDB 1.x instance, +[create a 1.x-compatible authorization](#create-a-1x-compatible-authorization) +with the same username and password as your InfluxDB 1.x instance to allow +external clients to connect to your InfluxDB {{< current-version >}} instance without any change. + +##### Recommended if: +- Your 1.x instance **has authentication enabled**. +- You're using **InfluxDB 1.x clients or client libraries** configured with + InfluxDB usernames and passwords. + +{{% note %}} +1.x compatibility authorizations are separate from credentials used to log +into the InfluxDB {{< current-version >}} user interface (UI). +{{% /note %}} + +#### Create a 1.x-compatible authorization +Use the InfluxDB {{< current-version >}} [`influx v1 auth create` command](/influxdb/v2.5/reference/cli/influx/v1/auth/create/) +to create a 1.x-compatible authorization that grants read/write permissions to specific {{< current-version >}} buckets. +Provide the following: + +- list of [bucket IDs](/influxdb/v2.5/organizations/buckets/view-buckets/) to + grant read or write permissions to +- new v1 auth username +- new v1 auth password _(when prompted)_ + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Single bucket](#) +[Mutiple buckets](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +influx v1 auth create \ + --read-bucket 00xX00o0X001 \ + --write-bucket 00xX00o0X001 \ + --username example-user +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sh +influx v1 auth create \ + --read-bucket 00xX00o0X001 \ + --read-bucket 00xX00o0X002 \ + --write-bucket 00xX00o0X001 \ + --write-bucket 00xX00o0X002 \ + --username example-user +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +For information about managing 1.x compatible authorizations, see the +[`influx v1 auth` command documentation](/influxdb/v2.5/reference/cli/influx/v1/auth/). + +## Migrate time series data +To migrate time series data from your InfluxDB 1.x instance to InfluxDB {{< current-version >}}: + +1. Use the **InfluxDB 1.x** [`influx_inspect export` command](/{{< latest "influxdb" "v1" >}}/tools/influx_inspect/#export) + to export time series data as line protocol. + Include the `-lponly` flag to exclude comments and the data definition + language (DDL) from the output file. + + _We recommend exporting each DBRP combination separately to easily write data + to a corresponding InfluxDB {{< current-version >}} bucket._ + + ```sh + # Syntax + influx_inspect export \ + -database \ + -retention \ + -out \ + -lponly + + # Example + influx_inspect export \ + -database example-db \ + -retention example-rp \ + -out /path/to/example-db_example-rp.lp \ + -lponly + ``` + +2. Use the **InfluxDB {{< current-version >}}** [`influx write` command](/influxdb/v2.5/reference/cli/influx/write/) + to write the exported line protocol to InfluxDB {{< current-version >}}. + + ```sh + # Syntax + influx write \ + --bucket \ + --file + + # Example + influx write \ + --bucket example-db/example-rp \ + --file /path/to/example-db_example-rp.lp + ``` + +3. Repeat steps 1-2 for each bucket. + +## Migrate continuous queries +For information about migrating InfluxDB 1.x continuous queries to InfluxDB {{< current-version >}} tasks, +see [Migrate continuous queries to tasks](/influxdb/v2.5/upgrade/v1-to-v2/migrate-cqs/). + +## Query data with InfluxQL + +InfluxDB {{< current-version >}} supports querying data with both Flux and InfluxQL. +For more information about querying InfluxDB {{< current-version >}} with InfluxQL, +see [Query data with InfluxQL](/influxdb/v2.5/query-data/influxql/). diff --git a/content/influxdb/v2.5/upgrade/v1-to-v2/migrate-cqs.md b/content/influxdb/v2.5/upgrade/v1-to-v2/migrate-cqs.md new file mode 100644 index 000000000..f1e002fd2 --- /dev/null +++ b/content/influxdb/v2.5/upgrade/v1-to-v2/migrate-cqs.md @@ -0,0 +1,362 @@ +--- +title: Migrate continuous queries to tasks +description: > + InfluxDB OSS 2.x replaces 1.x continuous queries (CQs) with **InfluxDB tasks**. + To migrate continuous queries to InfluxDB 2.x, convert InfluxDB 1.x CQs into Flux and create new + InfluxDB 2.x tasks. +menu: + influxdb_2_5: + parent: InfluxDB 1.x to 2.5 + name: Migrate CQs +weight: 102 +related: + - /influxdb/v2.5/query-data/get-started/ + - /influxdb/v2.5/query-data/flux/ + - /influxdb/v2.5/process-data/ + - /influxdb/v2.5/process-data/common-tasks/ + - /influxdb/v2.5/reference/flux/flux-vs-influxql/ +--- + +InfluxDB OSS {{< current-version >}} replaces 1.x continuous queries (CQs) with **InfluxDB tasks**. +To migrate continuous queries to InfluxDB {{< current-version >}} tasks, do the following: + +1. [Output all InfluxDB 1.x continuous queries](#output-all-influxdb-1x-continuous-queries) +2. [Convert continuous queries to Flux queries](#convert-continuous-queries-to-flux-queries) +3. [Create new InfluxDB tasks](#create-new-influxdb-tasks) + +## Output all InfluxDB 1.x continuous queries + +If using the `influxd upgrade` command, by default, all continuous queries are +output to `~/continuous_queries.txt` during the upgrade process. +To customize the destination path of the continuous queries file, +use the `--continuous-query-export-path` flag with the `influxd upgrade` command. + +```sh +influxd upgrade --continuous-query-export-path /path/to/continuous_queries.txt +``` + +**To manually output continuous queries:** + +1. Use the **InfluxDB 1.x `influx` interactive shell** to run `SHOW CONTINUOUS QUERIES`: + + {{< keep-url >}} + ```sh + $ influx + Connected to http://localhost:8086 version {{< latest-patch version="1.8" >}} + InfluxDB shell version: {{< latest-patch version="1.8" >}} + > SHOW CONTINUOUS QUERIES + ``` + +2. Copy and save the displayed continuous queries. + +## Convert continuous queries to Flux queries + +To migrate InfluxDB 1.x continuous queries to InfluxDB {{< current-version >}} tasks, convert the InfluxQL query syntax to Flux. +The majority of continuous queries are simple downsampling queries and can be converted quickly +using the [`aggregateWindow()` function](/{{< latest "flux" >}}/stdlib/universe/aggregatewindow/). +For example: + +##### Example continuous query +```sql +CREATE CONTINUOUS QUERY "downsample-daily" ON "my-db" +BEGIN + SELECT mean("example-field") + INTO "my-db"."example-rp"."average-example-measurement" + FROM "example-measurement" + GROUP BY time(1h) +END +``` + +##### Equivalent Flux task +```js +option task = {name: "downsample-daily", every: 1d} + +from(bucket: "my-db/") + |> range(start: -task.every) + |> filter(fn: (r) => r._measurement == "example-measurement") + |> filter(fn: (r) => r._field == "example-field") + |> aggregateWindow(every: 1h, fn: mean) + |> set(key: "_measurement", value: "average-example-measurement") + |> to(org: "example-org", bucket: "my-db/example-rp") +``` + +### Convert InfluxQL continuous queries to Flux +Review the following statements and clauses to see how to convert your CQs to Flux: + +- [ON clause](#on-clause) +- [SELECT statement](#select-statement) +- [INTO clause](#into-clause) +- [FROM clause](#from-clause) +- [AS clause](#as-clause) +- [WHERE clause](#where-clause) +- [GROUP BY clause](#group-by-clause) +- [RESAMPLE clause](#resample-clause) + +#### ON clause +The `ON` clause defines the database to query. +In InfluxDB OSS {{< current-version >}}, database and retention policy combinations are mapped to specific buckets +(for more information, see [Database and retention policy mapping](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/)). + +Use the [`from()` function](/{{< latest "flux" >}}/stdlib/universe/from) +to specify the bucket to query: + +###### InfluxQL +```sql +CREATE CONTINUOUS QUERY "downsample-daily" ON "my-db" +-- ... +``` + +###### Flux +```js +from(bucket: "my-db/") +// ... +``` + +#### SELECT statement +The `SELECT` statement queries data by field, tag, and time from a specific measurement. +`SELECT` statements can take many different forms and converting them to Flux depends +on your use case. For information about Flux and InfluxQL function parity, see +[Flux vs InfluxQL](/influxdb/v2.5/reference/syntax/flux/flux-vs-influxql/#influxql-and-flux-parity). +See [other resources available to help](#other-helpful-resources). + +#### INTO clause +The `INTO` clause defines the measurement to write results to. +`INTO` also supports fully-qualified measurements that include the database and retention policy. +In InfluxDB OSS {{< current-version >}}, database and retention policy combinations are mapped to specific buckets +(for more information, see [Database and retention policy mapping](/influxdb/v2.5/reference/api/influxdb-1x/dbrp/)). + +To write to a measurement different than the measurement queried, use +[`set()`](/{{< latest "flux" >}}/stdlib/universe/set/) or +[`map()`](/{{< latest "flux" >}}/stdlib/universe/map/) +to change the measurement name. +Use the `to()` function to specify the bucket to write results to. + +###### InfluxQL +```sql +-- ... +INTO "example-db"."example-rp"."example-measurement" +-- ... +``` + +###### Flux +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[set()](#) +[map()](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```js +// ... + |> set(key: "_measurement", value: "example-measurement") + |> to(bucket: "example-db/example-rp") +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +// ... + |> map(fn: (r) => ({ r with _measurement: "example-measurement"})) + |> to(bucket: "example-db/example-rp") +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +##### Write pivoted data to InfluxDB +InfluxDB 1.x query results include a column for each field. +InfluxDB {{< current-version >}} does not do this by default, but it is possible with +[`pivot()`](/{{< latest "flux" >}}/stdlib/universe/pivot) +or [`schema.fieldsAsCols()`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/fieldsascols/). + +If you use `to()` to write _pivoted data_ back to InfluxDB {{< current-version >}}, each field column is stored as a tag. +To write pivoted fields back to InfluxDB as fields, import the `experimental` package +and use the [`experimental.to()` function](/{{< latest "flux" >}}/stdlib/experimental/to/). + +###### InfluxQL +```sql +CREATE CONTINUOUS QUERY "downsample-daily" ON "my-db" +BEGIN + SELECT mean("example-field-1"), mean("example-field-2") + INTO "example-db"."example-rp"."example-measurement" + FROM "example-measurement" + GROUP BY time(1h) +END +``` + +###### Flux +```js +// ... + +from(bucket: "my-db/") + |> range(start: -task.every) + |> filter(fn: (r) => r._measurement == "example-measurement") + |> filter(fn: (r) => r._field == "example-field-1" or r._field == "example-field-2") + |> aggregateWindow(every: task.every, fn: mean) + |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") + |> experimental.to(bucket: "example-db/example-rp") +``` + +#### FROM clause +The from clause defines the measurement to query. +Use the [`filter()` function](/{{< latest "flux" >}}/stdlib/universe/filter/) +to specify the measurement to query. + +###### InfluxQL +```sql +-- ... +FROM "example-measurement" +-- ... +``` + +###### Flux +```js +// ... + |> filter(fn: (r) => r._measurement == "example-measurement") +``` + +#### AS clause +The `AS` clause changes the name of the field when writing data back to InfluxDB. +Use [`set()`](/{{< latest "flux" >}}/stdlib/universe/set/) +or [`map()`](/{{< latest "flux" >}}/stdlib/universe/map/) +to change the field name. + +###### InfluxQL +```sql +-- ... +AS newfield +-- ... +``` + +###### Flux +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[set()](#) +[map()](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```js +// ... + |> set(key: "_field", value: "newfield") +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +// ... + |> map(fn: (r) => ({ r with _field: "newfield"})) +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +#### WHERE clause +The `WHERE` clause uses predicate logic to filter results based on fields, tags, or timestamps. +Use the [`filter()` function](/{{< latest "flux" >}}/stdlib/universe/filter/) +and Flux [comparison operators](/{{< latest "flux" >}}/spec/operators/#comparison-operators) +to filter results based on fields and tags. +Use the [`range()` function](/{{< latest "flux" >}}/stdlib/universe/range/) to filter results based on timestamps. + +###### InfluxQL +```sql +-- ... +WHERE "example-tag" = "foo" AND time > now() - 7d +``` + +###### Flux +```js +// ... + |> range(start: -7d) + |> filter(fn: (r) => r["example-tag"] == "foo") +``` + +#### GROUP BY clause +The InfluxQL `GROUP BY` clause groups data by specific tags or by time (typically to calculate an aggregate value for windows of time). + +##### Group by tags +Use the [`group()` function](/{{< latest "flux" >}}/stdlib/universe/group/) +to modify the [group key](/{{< latest "flux" >}}/get-started/data-model/#group-key) and change how data is grouped. + +###### InfluxQL +```sql +-- ... +GROUP BY "location" +``` + +###### Flux +```js +// ... + |> group(columns: ["location"]) +``` + +##### Group by time +Use the [`aggregateWindow()` function](//{{< latest "flux" >}}/stdlib/universe/aggregatewindow/) +to group data into time windows and perform an aggregation on each window. +In CQs, the interval specified in the `GROUP BY time()` clause determines the CQ execution interval. +Use the `GROUP BY time()` interval to set the `every` task option. + +###### InfluxQL +```sql +-- ... +SELECT MEAN("example-field") +FROM "example-measurement" +GROUP BY time(1h) +``` + +###### Flux +```js +option task = {name: "task-name", every: 1h} + +// ... + |> filter(fn: (r) => r._measurement == "example-measurement" and r._field == "example-field") + |> aggregateWindow(every: task.every, fn: mean) +``` + +#### RESAMPLE clause + +The CQ `RESAMPLE` clause uses data from the last specified duration to calculate a new aggregate point. +The `EVERY` interval in `RESAMPLE` defines how often the CQ runs. +The `FOR` interval defines the total time range queried by the CQ. + +To accomplish this same functionality in a Flux task, set the `start` parameter +in the `range()` function to the negative `FOR` duration. +Define the task execution interval in the `task` options. +For example: + +###### InfluxQL +```sql +CREATE CONTINUOUS QUERY "resample-example" ON "my-db" +RESAMPLE EVERY 1m FOR 30m +BEGIN + SELECT exponential_moving_average(mean("example-field"), 30) + INTO "resample-average-example-measurement" + FROM "example-measurement" + WHERE region = 'example-region' + GROUP BY time(1m) +END +``` + +###### Flux +```js +option task = {name: "resample-example", every: 1m} + +from(bucket: "my-db/") + |> range(start: -30m) + |> filter(fn: (r) => r._measurement == "example-measurement" and r._field == "example-field" and r.region == "example-region") + |> aggregateWindow(every: 1m, fn: mean) + |> exponentialMovingAverage(n: 30) + |> set(key: "_measurement", value: "resample-average-example-measurement") + |> to(bucket: "my-db/") +``` + +## Create new InfluxDB tasks +After converting your continuous query to Flux, use the Flux query to +[create a new task](/influxdb/v2.5/process-data/manage-tasks/create-task/). + +## Other helpful resources +The following resources are available and may be helpful when converting +continuous queries to Flux tasks. + +##### Documentation +- [Get started with Flux](/{{< latest "flux" >}}/get-started/) +- [Query data with Flux](/influxdb/v2.5/query-data/flux/) +- [Common tasks](/influxdb/v2.5/process-data/common-tasks/#downsample-data-with-influxdb) + +##### Community +- Post in the [InfluxData Community](https://community.influxdata.com/) +- Ask in the [InfluxDB Community Slack](https://influxdata.com/slack) diff --git a/content/influxdb/v2.5/upgrade/v2-to-v2.md b/content/influxdb/v2.5/upgrade/v2-to-v2.md new file mode 100644 index 000000000..4bdd6b324 --- /dev/null +++ b/content/influxdb/v2.5/upgrade/v2-to-v2.md @@ -0,0 +1,81 @@ +--- +title: Upgrade from InfluxDB 2.x to InfluxDB 2.5 +description: > + To upgrade from InfluxDB 2.0 beta 16 or earlier to InfluxDB 2.5 (stable), + manually upgrade all resources and data to the latest version by completing these steps. +menu: + influxdb_2_5: + parent: Upgrade InfluxDB + name: InfluxDB 2.x to 2.5 +weight: 10 +related: + - /influxdb/v2.5/reference/cli/influxd/downgrade/ + - /influxdb/v2.5/install/ +--- + +Upgrade to InfluxDB {{< current-version >}} from an earlier version of InfluxDB 2.x. + +{{% note %}} +#### InfluxDB 2.0 beta-16 or earlier +If you're upgrading from InfluxDB 2.0 beta-16 or earlier, you must first +[upgrade to InfluxDB 2.0](/influxdb/v2.0/upgrade/v2-beta-to-v2/), +and then complete the steps below. +{{% /note %}} + +{{< tabs-wrapper >}} +{{% tabs %}} +[macOS](#) +[Linux](#) +[Windows](#) +[Docker](#) +{{% /tabs %}} + + +{{% tab-content %}} +Do one of the following: + +- [Use homebrew to upgrade](#use-homebrew-to-upgrade) +- [Manually upgrade](#manually-upgrade) + +### Use homebrew to upgrade +``` +brew upgrade influxdb +``` + +### Manually upgrade +To manually upgrade, [download and install the latest version of InfluxDB {{< current-version >}} for macOS](/influxdb/v2.5/install/#manually-download-and-install) +in place of your current 2.x version. +{{% /tab-content %}} + + + +{{% tab-content %}} + +[Download and install the latest version of InfluxDB {{< current-version >}} for Linux](/influxdb/v2.5/install/?t=Linux#download-and-install-influxdb-v21) +in place of current 2.x version. + +{{% /tab-content %}} + + + +{{% tab-content %}} + +[Download and install the latest version of InfluxDB {{< current-version >}} for Windows](/influxdb/v2.5/install/?t=Windows) +in place of current 2.x version. + +{{% /tab-content %}} + + + +{{% tab-content %}} + +To upgrade to InfluxDB {{< current-version >}} with Docker, update your Docker +image to use the latest InfluxDB image. + +```sh +influxdb:{{< latest-patch >}} +``` +{{% /tab-content %}} + + +{{< /tabs-wrapper >}} diff --git a/content/influxdb/v2.5/users/_index.md b/content/influxdb/v2.5/users/_index.md new file mode 100644 index 000000000..6afbf1717 --- /dev/null +++ b/content/influxdb/v2.5/users/_index.md @@ -0,0 +1,26 @@ +--- +title: Manage users +seotitle: Manage users in InfluxDB +description: Manage users in InfluxDB using the InfluxDB UI or the influx CLI. +influxdb/v2.5/tags: [users, authentication] +menu: + influxdb_2_5: + name: Manage users +weight: 11 +products: [oss] +--- + +Users are those with access to InfluxDB. +To grant a user permission to access data, add them as a [member of an organization](/influxdb/v2.5/organizations/members/) +and provide them with an [API token](/influxdb/v2.5/security/tokens/). + +The following articles walk through managing users. + +{{% note %}} +#### InfluxDB 2.x/1.x compatibility +If you [upgraded from 1.x to {{< current-version >}}](/influxdb/v2.5/upgrade/v1-to-v2/), +use the [`influx v1 auth`](/influxdb/v2.5/reference/cli/influx/v1/auth/) commands +to manage authorizations for the InfluxDB [1.x compatibility API](/influxdb/v2.5/reference/api/influxdb-1x/). +{{% /note %}} + +{{< children >}} diff --git a/content/influxdb/v2.5/users/change-password.md b/content/influxdb/v2.5/users/change-password.md new file mode 100644 index 000000000..a19d961df --- /dev/null +++ b/content/influxdb/v2.5/users/change-password.md @@ -0,0 +1,36 @@ +--- +title: Change your password +seotitle: Change your password in InfluxDB +description: Change your password in InfluxDB using the influx CLI. +menu: + influxdb_2_5: + name: Change your password + parent: Manage users +weight: 105 +--- + +Use `influx` command line interface (CLI) to update your password. + +{{% note %}} +User passwords cannot be updated in the InfluxDB UI. +{{% /note %}} + +## Change your password using the influx CLI + +Use the [`influx user password` command](/influxdb/v2.5/reference/cli/influx/user/password) +to update a password for a user. To update a password, you need the following: + +- Username or user ID _(provided in the output of `influx user list`)_ +- New password +- [Operator token](/influxdb/v2.5/security/tokens/#operator-token) + +##### Update a password +```sh +# Syntax +influx user password -n -t + +# Example +influx user password -n johndoe -t My5uPErSecR37t0k3n +``` + +When prompted, enter and confirm the new password. diff --git a/content/influxdb/v2.5/users/create-user.md b/content/influxdb/v2.5/users/create-user.md new file mode 100644 index 000000000..25a94fa96 --- /dev/null +++ b/content/influxdb/v2.5/users/create-user.md @@ -0,0 +1,51 @@ +--- +title: Create a user +seotitle: Create a user in InfluxDB +description: Create a user in InfluxDB using the InfluxDB UI or the influx CLI. +menu: + influxdb_2_5: + name: Create a user + parent: Manage users +weight: 101 +products: [oss] +--- + +Use the `influx` command line interface (CLI) to create a user. + +{{% note %}} +Additional users cannot be created in the InfluxDB UI. +{{% /note %}} + +## Create a user using the influx CLI + +To create a new user, use the [`influx user create` command](/influxdb/v2.5/reference/cli/influx/user/create) +and include the following: + +- Username +- Organization name or organization ID to add the user to _(provided in the output of + [`influx org list`](/influxdb/v2.5/reference/cli/influx/org/list/))_ + +```sh +# Syntax +influx user create -n -o + +# Example +influx user create -n johndoe -o example-org +``` + +### Create a user with a password and organization +To create a new user with a password and add the user as a member of an organization, +include a password and organization ID with the `influx user create` command. + +- Username +- Organization name or organization ID to add the user to _(provided in the output of + [`influx org list`](/influxdb/v2.5/reference/cli/influx/org/list/))_ +- Password + +```sh +# Syntax +influx user create -n -p -o + +# Example +influx user create -n johndoe -p PaSsWoRd -o example-org +``` diff --git a/content/influxdb/v2.5/users/delete-user.md b/content/influxdb/v2.5/users/delete-user.md new file mode 100644 index 000000000..16bb48472 --- /dev/null +++ b/content/influxdb/v2.5/users/delete-user.md @@ -0,0 +1,41 @@ +--- +title: Delete a user +seotitle: Delete a user from InfluxDB +description: Delete a user from InfluxDB using the InfluxDB UI or the influx CLI. +menu: + influxdb_2_5: + name: Delete a user + parent: Manage users +weight: 103 +products: [oss] +--- + +Use the InfluxDB user interface (UI) or the `influx` command line interface (CLI) +to delete a user. + +{{% warn %}} +Deleting a user removes them completely from InfluxDB. +To remove a user from an organization without deleting the user entirely, see +[Remove a member from an organization](/influxdb/v2.5/organizations/members/remove-member/). +{{% /warn %}} + +## Delete a user from the InfluxDB UI + +{{% note %}} +Users cannot be deleted from the InfluxDB UI. +{{% /note %}} + +## Delete a user using the influx CLI + +Use the [`influx user delete` command](/influxdb/v2.5/reference/cli/influx/user/delete) +to delete a user. Deleting a user requires the following: + +- The user ID _(provided in the output of `influx user list`)_ + +```sh +# Syntax +influx user delete -i + +# Example +influx user delete -i 034ad714fdd6f000 +``` diff --git a/content/influxdb/v2.5/users/recover-credentials.md b/content/influxdb/v2.5/users/recover-credentials.md new file mode 100644 index 000000000..3208587f1 --- /dev/null +++ b/content/influxdb/v2.5/users/recover-credentials.md @@ -0,0 +1,54 @@ +--- +title: Recover user credentials +seotitle: Recover InfluxDB user credentials +description: Recover InfluxDB user credentials using the influx CLI. +menu: + influxdb_2_5: + name: Recover credentials + parent: Manage users +weight: 106 +products: [oss] +related: + - /influxdb/v2.5/reference/cli/influxd/recovery/ +--- + +Use the `influxd` command line interface (CLI) to recover user credentials and regain access to your InfluxDB instance: +- [Update a password](#update-a-password) +- [List existing users in the InfluxDB instance](#list-existing-users-in-the-influxdb-instance) +- [Create a user for recovery purposes](#create-a-user-for-recovery-purposes) + +## Update a password + +To update a password, run the following: + +```sh +influxd recovery user update \ + --username example-username \ + --password ExAmPL3-paS5W0rD +``` + +{{% note %}} +**Note:** If you're not sure of the username, [list existing users in the InfluxDB instance](#list-existing-users-in-the-influxdb-instance) or [create a user for recovery purposes](#create-a-user-for-recovery-purposes). +{{% /note %}} + +## List existing users in the InfluxDB instance + +To list existing users in the system, run the following: + +```sh +influxd recovery user list +``` + +{{% note %}} +If you used a [custom `bolt-path`](/influxdb/v2.5/reference/config-options/#bolt-path) when starting InfluxDB, provide your custom bolt path to the `influx recovery user list` command with the `--bolt-path` flag. +{{% /note %}} + +## Create a user for recovery purposes + +To create a new user for recovery purposes, run the following: + +```sh +influxd recovery user create \ + --username example-username \ + --password ExAmPL3-paS5W0rD +``` diff --git a/content/influxdb/v2.5/users/update-user.md b/content/influxdb/v2.5/users/update-user.md new file mode 100644 index 000000000..a061657ff --- /dev/null +++ b/content/influxdb/v2.5/users/update-user.md @@ -0,0 +1,36 @@ +--- +title: Update a user +seotitle: Update a user in InfluxDB +description: Update a user in InfluxDB using the InfluxDB UI or the influx CLI. +menu: + influxdb_2_5: + name: Update a user + parent: Manage users +weight: 103 +products: [oss] +--- + +Use the InfluxDB user interface (UI) or the `influx` command line interface (CLI) +to update a user. + +## Update a user in the InfluxDB UI + +{{% note %}} +User information cannot be updated in the InfluxDB UI. +{{% /note %}} + +## Update a user using the influx CLI + +Use the [`influx user update` command](/influxdb/v2.5/reference/cli/influx/user/update) +to update a user. Updating a user requires the following: + +- The user ID _(provided in the output of `influx user list`)_ + +##### Update the name of a user +```sh +# Syntax +influx user update -i -n + +# Example +influx user update -i 034ad714fdd6f000 -n janedoe +``` diff --git a/content/influxdb/v2.5/users/view-users.md b/content/influxdb/v2.5/users/view-users.md new file mode 100644 index 000000000..2932e68e0 --- /dev/null +++ b/content/influxdb/v2.5/users/view-users.md @@ -0,0 +1,33 @@ +--- +title: View users +seotitle: View users in InfluxDB +description: Review a list of users in InfluxDB using the InfluxDB UI or the influx CLI. +menu: + influxdb_2_5: + name: View users + parent: Manage users +weight: 102 +products: [oss] +--- + +Use the InfluxDB user interface (UI) or the `influx` command line interface (CLI) +to view users. + +## View users in the InfluxDB UI + +{{% note %}} +There is no list of users in the InfluxDB UI. +{{% /note %}} + +## View users using the influx CLI + +Use the [`influx user list` command](/influxdb/v2.5/reference/cli/influx/user/list) +to view users. + +```sh +influx user list +``` + +Filtering options such as filtering by username or ID are available. +See the [`influx user list` documentation](/influxdb/v2.5/reference/cli/influx/user/list) +for information about other available flags. diff --git a/content/influxdb/v2.5/visualize-data/_index.md b/content/influxdb/v2.5/visualize-data/_index.md new file mode 100644 index 000000000..eca59e3e5 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/_index.md @@ -0,0 +1,22 @@ +--- +title: Visualize data with the InfluxDB UI +description: > + InfluxDB offers a complete dashboard solution for visualizing your time series data. + Create custom dashboards with flexible queries and visualization types. +influxdb/v2.5/tags: [visualize] +menu: + influxdb_2_5: + name: Visualize data +weight: 5 +--- + +The InfluxDB user interface (UI) provides tools for building custom dashboards to visualize your data. +The following articles outline ways to customize and manage dashboards. + +{{% note %}} +The InfluxDB UI is packaged with InfluxDB and runs as part of the InfluxDB server. +To access the UI, start the [`influxd` service](/influxdb/v2.5/reference/cli/influxd/) +and visit in your web browser. +{{% /note %}} + +{{< children >}} diff --git a/content/influxdb/v2.5/visualize-data/annotations.md b/content/influxdb/v2.5/visualize-data/annotations.md new file mode 100644 index 000000000..f190f7f53 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/annotations.md @@ -0,0 +1,68 @@ +--- +title: Use annotations in dashboards +description: > + Add annotations to your InfluxDB dashboards to provide useful, contextual information about single points in time. +influxdb/v2.5/tags: [labels, annotations] +menu: + influxdb_2_5: + name: Use annotations + parent: Visualize data +weight: 105 +--- + +Add annotations to your dashboards to provide useful, contextual information about single points in time. After an annotation is created, edit the annotation by updating the text or timestamp, or delete the annotation. + +- [Create an annotation](#create-an-annotation) +- [Edit an annotation](#edit-an-annotation) +- [View or hide annotations](#view-or-hide-annotations) +- [Delete an annotation](#delete-an-annotation) + +{{% note %}} +Annotations may be useful to highlight operations or anomalies for your team to reference. +{{% /note %}} + + + +## Create an annotation + +1. In the navigation menu on the left, select **Boards** (**Dashboards**). + + {{< nav-icon "dashboards" >}} + +2. Select an existing dashboard to add the annotation to, or [create a new dashboard](/influxdb/cloud/visualize-data/dashboards/create-dashboard/), and then click the **Annotations** button. +3. In a dashboard cell, do one of the following: + - Press Shift and click the graph at the time you want to annotate. + - To add an annotation to a time range, press Shift, click the start time, and then drag your cursor to the end time. +4. On the **Add Annotation** page: + + 1. Verify the time or time range, and update if needed. + 2. Enter a message (maximum of 255 characters) to associate with the selected time or time range. + 3. Click **Save Annotation**. The annotation appears in the cell (dotted lines indicate the selected time or time range). + +## Edit an annotation + +1. In the navigation menu on the left, select **Boards** (**Dashboards**). + + {{< nav-icon "dashboards" >}} + +2. Open the dashboard with the annotation to edit, and then click the annotation to open it. +3. Update the text (maximum of 255 characters) or timestamp, and then click **Save Annotation**. + +## View or hide annotations + +By default, annotations are visible. + +1. In the navigation menu on the left, select **Boards** (**Dashboards**). + + {{< nav-icon "dashboards" >}} + +2. Open a dashboard with annotations: + - To hide annotations, click the **Annotations** button. The button is gray when annotations are hidden. + - To show annotations, click the **Annotations** button. The button is purple when annotations are visible. + +## Delete an annotation + +1. In the navigation menu on the left, select **Boards** (**Dashboards**). + + {{< nav-icon "dashboards" >}} +2. Open a dashboard with the annotation to delete, click the dotted annotation line, and then click **Delete Annotation**. diff --git a/content/influxdb/v2.5/visualize-data/dashboards/_index.md b/content/influxdb/v2.5/visualize-data/dashboards/_index.md new file mode 100644 index 000000000..67d232538 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/dashboards/_index.md @@ -0,0 +1,39 @@ +--- +title: Manage InfluxDB dashboards +description: Create, edit, and manage custom dashboards in the InfluxDB user interface (UI). +influxdb/v2.5/tags: [dashboards] +menu: + influxdb_2_5: + name: Manage dashboards + parent: Visualize data +weight: 102 +--- + +Create, edit, and manage dashboards from the **Dashboards** tab in the left navigation. + +{{< children >}} + + +## View your dashboard ID + +Use the InfluxDB UI or `influx` CLI to view your dashboard ID. + +### Dashboard ID in the UI + +When viewing a dashboard in the InfluxDB UI, your dashboard ID appears in the URL. + +{{< code-callout "04b6b15034cc000" >}} +```sh +http://localhost:8086/orgs/03a2bbf46249a000/dashboards/04b6b15034cc000/... +``` +{{< /code-callout >}} + +### Dashboard ID in the CLI +Use [`influx dashboards`](/influxdb/v2.5/reference/cli/influx/dashboards/) to view a list of dashboards and IDs. + +```sh +> influx dashboards +ID Name +03a2bbf46249a000 dashboard-1 +03ace3a859669000 dashboard-2 +``` diff --git a/content/influxdb/v2.5/visualize-data/dashboards/control-dashboard.md b/content/influxdb/v2.5/visualize-data/dashboards/control-dashboard.md new file mode 100644 index 000000000..0a84c96f9 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/dashboards/control-dashboard.md @@ -0,0 +1,67 @@ +--- +title: Control a dashboard +seotitle: Control an InfluxDB dashboard +description: Control an InfluxDB dashboard in the InfluxDB user interface (UI). +influxdb/v2.5/tags: [dashboards] +menu: + influxdb_2_5: + name: Control a dashboard + parent: Manage dashboards +weight: 203 +--- + +## Control at the dashboard level + +Use dashboard controls in the upper left to update your dashboard. + +### Add a cell + +Click **{{< icon "add-cell" >}} Add Cell** to open the Data Explorer and configure a new cell for your dashboard. + +For details on using the Data Explorer, see [Explore metrics](/influxdb/v2.5/visualize-data/explore-metrics/). + +### Add a note + +1. Click **{{< icon "note" >}} Add Note** to add a note cell to your dashboard. +2. Enter your note in Markdown in the left pane. A preview appears in the right pane. +3. Enable the **Show note when query returns no data** option to show the note only when the query displays no data. +4. Click **Save**. + +### Select timezone + +Click the timezone dropdown to select a timezone to use for the dashboard. Select either the local time (default) or UTC. + +{{< img-hd src="/img/influxdb/2-0-controls-timezone.png" alt="Select timezone" />}} + +### Manually refresh dashboard + +Click the refresh button (**{{< icon "refresh" >}}**) to manually refresh the dashboard's data. + +#### Manually refresh a single dashboard cell + +1. Click the **{{< icon "gear" >}}** on the dashboard cell you want to refresh. +2. Click **{{< icon "refresh" >}} Refresh**. + +### Select time range + +1. Select from the time range options in the dropdown menu. + + {{< img-hd src="/img/influxdb/2-0-controls-time-range.png" alt="Select time range" />}} + +2. Select **Custom Time Range** to enter a custom time range with precision up to nanoseconds. +The default time range is 5 minutes. + + > The custom time range uses the selected timezone (local time or UTC). + +### Add variables + +Click **Variables** to display variables available for your dashboard. For details, see [Use and manage variables](/influxdb/v2.5/visualize-data/variables/) + +### Presentation mode + +Click the fullscreen icon (**{{< icon "fullscreen" >}}**) to enter presentation mode. Presentation mode allows you to view [a dashboard] in full screen, hiding the left and top navigation menus so only the cells appear. This mode might be helpful, for example, for stationary screens dedicated to monitoring visualizations. + +### Toggle dark mode and light mode +Click the moon or sun icons to toggle your dashboard between **dark mode** and **light mode.** + +{{< img-hd src="/img/influxdb/2-0-controls-dark-light-mode.png" alt="Dark & light mode"/>}} diff --git a/content/influxdb/v2.5/visualize-data/dashboards/create-dashboard.md b/content/influxdb/v2.5/visualize-data/dashboards/create-dashboard.md new file mode 100644 index 000000000..924103d24 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/dashboards/create-dashboard.md @@ -0,0 +1,58 @@ +--- +title: Create a dashboard +seotitle: Create an InfluxDB dashboard +description: > + Create a new dashboard or import an existing dashboard in the + InfluxDB user interface (UI). +influxdb/v2.5/tags: [dashboards] +menu: + influxdb_2_5: + name: Create a dashboard + parent: Manage dashboards +weight: 201 +--- + +## Create a new dashboard + +1. In the navigation menu on the left, select **Boards** (**Dashboards**). + + {{< nav-icon "dashboards" >}} + +2. Click the **{{< icon "plus" >}} Create Dashboard** menu in the upper right and select **New Dashboard**. +3. Enter a name for your dashboard in the **Name this dashboard** field in the upper left. + + +**To import an existing dashboard**: + +1. In the navigation menu on the left, select **Boards** (**Dashboards**). + + {{< nav-icon "dashboards" >}} + +2. Click the **Create Dashboard** menu in the upper right and select **Import Dashboard**. +3. In the window that appears: + - Select **Upload File** to drag-and-drop or select a file. + - Select **Paste JSON** to paste in JSON. +4. Click **Import JSON as Dashboard**. + +## Clone a dashboard + +1. In the navigation menu on the left, select **Boards** (**Dashboards**). + + {{< nav-icon "dashboards" >}} + +2. Hover over the dashboard and click **{{< icon "copy" >}}**. +3. Click **Clone**. The cloned dashboard opens. + +#### Add data to your dashboard + +1. From your dashboard, click **{{< icon "add-cell" >}} Add Cell**. +2. Create a query in the Data Explorer following the instructions in [Explore metrics](/influxdb/v2.5/visualize-data/explore-metrics). +3. Enter a name for your cell in the upper left. +4. Click the checkmark icon (**{{< icon "checkmark" >}}**) to save the cell to your dashboard. + You can also send data to your dashboard directly from the Data Explorer. For details, [Explore metrics](/influxdb/v2.5/visualize-data/explore-metrics). + +#### Add a note to your dashboard +1. From your dashboard, click **{{< icon "note" >}} Add Note** in the upper left. +2. Enter your note in the window that appears. You can use Markdown syntax to format your note. +3. To preview your Markdown formatting, click the **Preview** option. +4. Click **Save**. diff --git a/content/influxdb/v2.5/visualize-data/dashboards/delete-dashboard.md b/content/influxdb/v2.5/visualize-data/dashboards/delete-dashboard.md new file mode 100644 index 000000000..28bfd0ff8 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/dashboards/delete-dashboard.md @@ -0,0 +1,23 @@ +--- +title: Delete a dashboard +seotitle: Delete an InfluxDB dashboard +description: Delete a dashboard from the InfluxDB user interface (UI). +influxdb/v2.5/tags: [dashboards] +menu: + influxdb_2_5: + parent: Manage dashboards +weight: 204 +--- + +To delete a dashboard from the InfluxDB user interface (UI): + +1. In the navigation menu on the left, select **Boards** (**Dashboards**). + + {{< nav-icon "dashboards" >}} + +2. Hover over the dashboard in the list of dashboards and click **{{< icon "trash" >}}**. +3. Click **Delete**. + +{{% warn %}} +Deleting a dashboard cannot be undone. +{{% /warn %}} diff --git a/content/influxdb/v2.5/visualize-data/dashboards/export-dashboard.md b/content/influxdb/v2.5/visualize-data/dashboards/export-dashboard.md new file mode 100644 index 000000000..17d39b6e2 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/dashboards/export-dashboard.md @@ -0,0 +1,26 @@ +--- +title: Export a dashboard +seotitle: Export an InfluxDB dashboard +description: > + Export a dashboard using the InfluxDB user interface (UI). +influxdb/v2.5/tags: [dashboards] +menu: + influxdb_2_5: + name: Export a dashboard + parent: Manage dashboards +weight: 203 +--- + +InfluxDB lets you export dashboards from the InfluxDB user interface (UI). + +1. In the navigation menu on the left, select **Boards** (**Dashboards**). + + {{< nav-icon "dashboards" >}} + +2. Hover over a dashboard and click the gear icon (**{{< icon "gear" >}}**), + and then select **Export**. +3. Review the JSON in the window that appears. +4. Select one of the following options: + * **Download JSON**: Download the dashboard as a JSON file. + * **Save as template**: Save the JSON as a dashboard template. + * **Copy to Clipboard**: Copy the JSON to your clipboard. diff --git a/content/influxdb/v2.5/visualize-data/labels.md b/content/influxdb/v2.5/visualize-data/labels.md new file mode 100644 index 000000000..5d62cd258 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/labels.md @@ -0,0 +1,44 @@ +--- +title: Manage labels in the InfluxDB UI +description: > + Labels are a way to add visual metadata to dashboards, tasks, and other items + in the InfluxDB UI. View and manage labels in the InfluxDB user interface. +influxdb/v2.5/tags: [labels] +menu: + influxdb_2_5: + name: Manage labels + parent: Visualize data +weight: 104 +--- + +Labels are a way to add visual metadata to dashboards, tasks, and other items in the InfluxDB UI. +To manage labels: + +- In the navigation menu on the left, select **Settings** > **Labels**. + + {{< nav-icon "settings" >}} + + +#### Create a label +1. Click **{{< icon "plus" >}} Create Label**. +2. Enter a **Name** for the label. +3. Enter a description for the label _(Optional)_. +4. Select a **Color** for the label. +5. Click **Create Label**. + +#### Edit a label +1. In the label list view, click the name of the label you would like to edit. + The **Edit Label** overlay will appear. +2. Make the desired changes to the label. +3. Click **Save Changes**. + +#### Delete a label +1. In the label list view, hover over the label you would like to delete and click **{{< icon "trash" >}}**. +2. Click **Delete**. + +### Add labels to dashboard items +1. In the list view of dashboards, tasks, or other assets, hover over the item to which you would like to add a label. +2. Click the {{< icon "add-label" >}} icon that appears below the name. + The **Add Labels** overlay will appear. +3. Type the name of the label you would like to add to filter the list of available labels. + Click the label you would like to add. diff --git a/content/influxdb/v2.5/visualize-data/variables/_index.md b/content/influxdb/v2.5/visualize-data/variables/_index.md new file mode 100644 index 000000000..451918f88 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/variables/_index.md @@ -0,0 +1,73 @@ +--- +title: Use and manage variables +seotitle: Use and manage dashboard variables +description: > + Dashboard variables allow you to alter specific components of cells' queries + without having to edit the queries, making it easy to interact with your dashboard cells and explore your data. +menu: + influxdb_2_5: + parent: Visualize data +weight: 103 +influxdb/v2.5/tags: [variables] +--- + +Dashboard variables let you alter specific components of cells' queries without having to edit the queries, +making it easy to interact with your dashboard cells and explore your data. + +Variables are scoped by organization. + +## Use dashboard variables +Both [predefined dashboard variables](#predefined-dashboard-variables) and [custom dashboard variables](#custom-dashboard-variables) +are stored in a `v` record associated with each dashboard. +Reference each variable using dot-notation (e.g. `v.variableName`). + +```js +from(bucket: v.bucket) + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r._measurement == v.measurement and r._field == v.field) + |> aggregateWindow(every: v.windowPeriod, fn: mean) +``` + +When building Flux queries for dashboard cells, view available dashboard variables +in the **Variables** tab next to the Functions tab. + +{{< img-hd src="/img/influxdb/2-0-variables-data-explorer-view.png" />}} + +Click a variable name to add it to your query and select a value from the **Value** dropdown. + +## Link to dashboards with variables defined in the URL + +When you apply a variable to your dashboard, `&vars[variable_name]=value` is appended to the URL so you can share the link with the variables included. + +## Predefined dashboard variables +The InfluxDB user interface (UI) provides the following predefined dashboard variables: + +#### v.timeRangeStart +Specifies the beginning of the queried time range. +This variable is typically used to define the [`start` parameter](/{{< latest "flux" >}}/stdlib/universe/range#start) +of the `range()` function. + +The **Time Range** selector defines the value of this variable. + +#### v.timeRangeStop +Specifies the end of the queried time range. +This variable is typically used to define the [`stop` parameter](/{{< latest "flux" >}}/stdlib/universe/range#stop) +of the `range()` function. + +The **Time Range** selector defines the value of this variable. +It defaults to `now`. + +#### v.windowPeriod +Specifies the period of windowed data. +This variable is typically used to define the `every` or `period` parameters of the +[`window()` function](/{{< latest "flux" >}}/stdlib/universe/window) +in data aggregation operations. + +The value of this variable is calculated by analyzing the duration of the Flux +query it is used within. Queries that fetch data from a longer time range will +have a larger `v.windowPeriod` duration. + +## Custom dashboard variables +Create, manage, and use custom dashboard variables in the InfluxDB user interface (UI). + +{{< children >}} diff --git a/content/influxdb/v2.5/visualize-data/variables/common-variables.md b/content/influxdb/v2.5/visualize-data/variables/common-variables.md new file mode 100644 index 000000000..a63adb469 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/variables/common-variables.md @@ -0,0 +1,112 @@ +--- +title: Common variable queries +description: Useful queries to use to populate values in common dashboard variable use cases. +menu: + influxdb_2_5: + parent: Use and manage variables + name: Common variable queries +weight: 208 +influxdb/v2.5/tags: [variables] +--- + +## List buckets +List all buckets in the current organization. + +_**Flux functions:** +[buckets()](/{{< latest "flux" >}}/stdlib/universe/buckets/), +[rename()](/{{< latest "flux" >}}/stdlib/universe/rename/), +[keep()](/{{< latest "flux" >}}/stdlib/universe/keep/)_ + +```js +buckets() + |> rename(columns: {"name": "_value"}) + |> keep(columns: ["_value"]) +``` + +## List measurements +List all measurements in a specified bucket. + +_**Flux package:** [InfluxDB schema](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/) +**Flux functions:** [schema.measurements()](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/measurements/)_ + +```js +import "influxdata/influxdb/schema" + +schema.measurements(bucket: "bucket-name") +``` + +## List fields in a measurement +List all fields in a specified bucket and measurement. + +_**Flux package:** [InfluxDB schema](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/) +**Flux functions:** [schema.measurementTagValues()](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/measurementtagvalues/)_ + +```js +import "influxdata/influxdb/schema" + +schema.measurementTagValues( + bucket: "bucket-name", + measurement: "measurement-name", + tag: "_field", +) +``` + +## List unique tag values +List all unique tag values for a specific tag in a specified bucket. +The example below lists all unique values of the `host` tag. + +_**Flux package:** [InfluxDB schema](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/)_ +_**Flux functions:** [schema.tagValues()](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/tagvalues/)_ + +```js +import "influxdata/influxdb/schema" + +schema.tagValues(bucket: "bucket-name", tag: "host") +``` + +## List Docker containers +List all Docker containers when using the Docker Telegraf plugin. + +_**Telegraf plugin:** [Docker](/{{< latest "telegraf" >}}/plugins/#input-docker)_ +_**Flux package:** [InfluxDB schema](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/)_ +_**Flux functions:** [schema.tagValues()](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/tagvalues/)_ + +```js +import "influxdata/influxdb/schema" + +schema.tagValues(bucket: "bucket-name", tag: "container_name") +``` + +## List Kubernetes pods +List all Kubernetes pods when using the Kubernetes Telegraf plugin. + +_**Telegraf plugin:** [Kubernetes](/{{< latest "telegraf" >}}/plugins/#input-kubernetes)_ +_**Flux package:** [InfluxDB schema](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/)_ +_**Flux functions:** [schema.measurementTagValues()](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/measurementtagvalues/)_ + +```js +import "influxdata/influxdb/schema" + +schema.measurementTagValues( + bucket: "bucket-name", + measurement: "kubernetes_pod_container", + tag: "pod_name", +) +``` + +## List Kubernetes nodes +List all Kubernetes nodes when using the Kubernetes Telegraf plugin. + +_**Telegraf plugin:** [Kubernetes](/{{< latest "telegraf" >}}/plugins/#input-kubernetes)_ +_**Flux package:** [InfluxDB schema](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/)_ +_**Flux functions:** [schema.measurementTagValues()](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/schema/measurementtagvalues/)_ + +```js +import "influxdata/influxdb/schema" + +schema.measurementTagValues( + bucket: "bucket-name", + measurement: "kubernetes_node", + tag: "node_name", +) +``` diff --git a/content/influxdb/v2.5/visualize-data/variables/create-variable.md b/content/influxdb/v2.5/visualize-data/variables/create-variable.md new file mode 100644 index 000000000..660d03782 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/variables/create-variable.md @@ -0,0 +1,73 @@ +--- +title: Create a variable +seotitle: Create a dashboard variable +description: Create dashboard variables in the Data Explorer, from the Organization page, or import a variable. +menu: + influxdb_2_5: + parent: Use and manage variables +weight: 201 +influxdb/v2.5/tags: [variables] +--- + +Create dashboard variables in the Data Explorer, from the Settings section, or import a variable. +**Variable names must be unique.** + +There are multiple variable types that provide different means of populating your list of variable values. +_For information about variable types, see [Variable types](/influxdb/v2.5/visualize-data/variables/variable-types/)._ + +{{% note %}} +##### Variable name restrictions +Variable names must begin with a letter or underscore (`_`). + +The following names cannot be used as dashboard variables because they are reserved keywords in Flux: +`and`, `import`, `not`, `return`, `option`, `test`, `empty`, `in`, `or`, `package`, and `builtin`. +{{% /note %}} + +## Create a variable in the Data Explorer + +{{% note %}} +InfluxData recommends using the Data Explorer to create +[Query dashboard variables](/influxdb/v2.5/visualize-data/variables/variable-types/#query). +The [Table visualization type](/influxdb/v2.5/visualize-data/visualization-types/table/) and +**View Raw Data** option to provide human-readable query results. +{{% /note %}} + +1. Click the **Data Explorer** icon in the sidebar. + + {{< nav-icon "data-explorer" >}} + +2. Use the **Query Builder** or **Script Editor** to build a query. +3. Use the [Table visualization type](/influxdb/v2.5/visualize-data/visualization-types/table/) + or enable the **View Raw Data** option to view human-readable query results. +4. Click **Save As** in the upper right. +5. In the window that appears, select **Variable**. +6. Enter a name for your variable in the **Name** field. +7. Click **Create**. + +_For information about common Query variables, see [Common variable queries](/influxdb/v2.5/visualize-data/variables/common-variables/)._ + +## Create a variable in the Settings section + +1. Click the **Settings** icon in the navigation bar. + + {{< nav-icon "settings" >}} + +2. Select the **Variables** tab. +3. Enter a name for your variable. +4. Select your variable type. For details on each type, see [Variable types](/influxdb/v2.5/visualize-data/variables/variable-types/). +5. Enter the appropriate variable information. +6. Click **Create**. + +## Import a variable +InfluxDB lets you import variables exported from InfluxDB in JSON format. + +1. Click the **Settings** icon in the navigation bar. + + {{< nav-icon "settings" >}} + +2. Select the **Variables** tab. +3. Click the **{{< icon "plus" >}} Create Variable** drop-down menu and select **Import Variable**. +4. In the window that appears: + - Select **Upload File** to drag and drop or select a file. + - Select **Paste JSON** to paste in JSON. +6. Click **Import JSON as Variable**. diff --git a/content/influxdb/v2.5/visualize-data/variables/delete-variable.md b/content/influxdb/v2.5/visualize-data/variables/delete-variable.md new file mode 100644 index 000000000..609b821a3 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/variables/delete-variable.md @@ -0,0 +1,26 @@ +--- +title: Delete a variable +seotitle: Delete a dashboard variable +description: Delete a dashboard variable in the InfluxDB user interface. +menu: + influxdb_2_5: + parent: Use and manage variables +weight: 205 +influxdb/v2.5/tags: [variables] +--- + +Delete an existing variable in the InfluxDB user interface (UI). + +### Delete a variable + +1. Click the **Settings** icon in the navigation bar. + + {{< nav-icon "settings" >}} + +2. Select the **Variables** tab. +3. Hover over a variable, click the **{{< icon "trash" >}}** icon, and **Delete**. + +{{% warn %}} +Once deleted, any dashboards with queries that utilize the variable will no +longer function correctly. +{{% /warn %}} diff --git a/content/influxdb/v2.5/visualize-data/variables/export-variable.md b/content/influxdb/v2.5/visualize-data/variables/export-variable.md new file mode 100644 index 000000000..309c184ed --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/variables/export-variable.md @@ -0,0 +1,27 @@ +--- +title: Export a variable +seotitle: Export a dashboard variable +description: Export a dashboard variable in the InfluxDB user interface. +menu: + influxdb_2_5: + parent: Use and manage variables +weight: 204 +influxdb/v2.5/tags: [variables] +--- +Export dashboard variables from the InfluxDB user interface (UI). +Variables are exported as downloadable JSON files. + +### Export a variable + +1. Click the **Settings** icon in the navigation bar. + + {{< nav-icon "settings" >}} + +2. Select the **Variables** tab. +3. Hover over a variable in the list, then click the gear icon (**{{< icon "gear" >}}**) + and select **Export**. +4. Review the JSON in the window that appears. +5. Select one of the following options: + * **Download JSON**: Download the dashboard as a JSON file. + * **Save as template**: Save the JSON as a dashboard template. + * **Copy to Clipboard**: Copy the JSON to your clipboard. diff --git a/content/influxdb/v2.5/visualize-data/variables/update-variable.md b/content/influxdb/v2.5/visualize-data/variables/update-variable.md new file mode 100644 index 000000000..0bc6ec254 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/variables/update-variable.md @@ -0,0 +1,23 @@ +--- +title: Update a variable +seotitle: Update a dashboard variable +description: Update a dashboard variable in the InfluxDB user interface. +menu: + influxdb_2_5: + parent: Use and manage variables +weight: 203 +influxdb/v2.5/tags: [variables] +--- + +Update an existing dashboard variable's name or JSON content in the InfluxDB user interface (UI). + +### Update a variable + +1. Click the **Settings** icon in the navigation bar. + + {{< nav-icon "settings" >}} + +2. Select the **Variables** tab. +3. Click a variable's name from the list. +4. Update the variable's name, type, and associated information. +5. Click **Submit**. diff --git a/content/influxdb/v2.5/visualize-data/variables/variable-types.md b/content/influxdb/v2.5/visualize-data/variables/variable-types.md new file mode 100644 index 000000000..e907d19da --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/variables/variable-types.md @@ -0,0 +1,97 @@ +--- +title: Variable types +seotitle: Dashboard variable types +description: Overview of the types of dashboard variables available in InfluxDB +menu: + influxdb_2_5: + parent: Use and manage variables +weight: 207 +influxdb/v2.5/tags: [variables] +--- + +Variable types determine how a variable's list of possible values is populated. +The following variable types are available: + +- [Map](#map) +- [Query](#query) +- [CSV](#csv) + +### Map +Map variables use a list of key value pairs in CSV format to map keys to specific values. +Keys populate the variable's value list in the InfluxDB user interface (UI), but +values are used when actually processing the query. + +The most common use case for map variables is aliasing simple, human-readable keys +to complex values. + +##### Map variable example +```js +Juanito MacNeil,"5TKl6l8i4idg15Fxxe4P" +Astrophel Chaudhary,"bDhZbuVj5RV94NcFXZPm" +Ochieng Benes,"YIhg6SoMKRUH8FMlHs3V" +Mila Emile,"o61AhpOGr5aO3cYVArC0" +``` + +### Query +Query variable values are populated using the `_value` column of a Flux query. + +##### Query variable example +```js +// List all buckets +buckets() + |> rename(columns: {"name": "_value"}) + |> keep(columns: ["_value"]) +``` + +_For examples of dashboard variable queries, see [Common variable queries](/influxdb/v2.5/visualize-data/variables/common-variables)._ + +{{% note %}} +#### Important things to note about variable queries +- The variable will only use values from the `_value` column. + If the data you’re looking for is in a column other than `_value`, use the + [`rename()`](/{{< latest "flux" >}}/stdlib/universe/rename/) or + [`map()`](/{{< latest "flux" >}}/stdlib/universe/map/) functions + to change the name of that column to `_value`. +- The variable will only use the first table in the output stream. + Use the [`group()` function](/{{< latest "flux" >}}/stdlib/universe/group) + to group everything into a single table. +- Do not use any [predefined dashboard variables](/influxdb/v2.5/visualize-data/variables/#predefined-dashboard-variables) in variable queries. +{{% /note %}} + +### CSV +CSV variables use a CSV-formatted list to populate variable values. +A common use case is when the list of potential values is static and cannot be +queried from InfluxDB. + +##### CSV variable examples +``` +value1, value2, value3, value4 +``` +``` +value1 +value2 +value3 +value4 +``` + +## Use custom dashboard variables + +Use the Flux `v` record and [dot or bracket notation](/{{< latest "flux" >}}/data-types/composite/record/#reference-values-in-a-record) to access custom dashboard variables. + +For example, to use a custom dashboard variable named `exampleVar` in a query, +reference the variable with `v.exampleVar`: + +```js +from(bucket: "telegraf") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r._measurement == "cpu" ) + |> filter(fn: (r) => r._field == "usage_user" ) + |> filter(fn: (r) => r.cpu == v.exampleVar) +``` + +**To select variable values:** + +- **In a dashboard:** Use the dashboard variable drop-down menus at the top of your dashboard. +- **In the Script Editor:** Click the **Variables** tab on the right of the Script Editor, click the name of the variable, and then select the variable value from the drop-down menu. + +_For more on using dashboard variables, see [Use and manage variables](/influxdb/v2.5/visualize-data/variables/)._ diff --git a/content/influxdb/v2.5/visualize-data/variables/view-variables.md b/content/influxdb/v2.5/visualize-data/variables/view-variables.md new file mode 100644 index 000000000..1d74d18fd --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/variables/view-variables.md @@ -0,0 +1,31 @@ +--- +title: View variables +seotitle: View dashboard variables +description: View dashboard variables in the InfluxDB user interface. +menu: + influxdb_2_5: + parent: Use and manage variables +weight: 203 +influxdb/v2.5/tags: [variables] +--- + +View a list of dashboard variables in the InfluxDB user interface (UI) from an organization or from the Data Explorer. + +## View variables in the organization + +1. Click the **Settings** icon in the navigation bar. + + {{< nav-icon "settings" >}} + +3. Select the **Variables** tab. + +## View variables in the Data Explorer + +1. Click the **Data Explorer** icon in the navigation bar. + + {{< nav-icon "data-explorer" >}} + +2. Switch to **Script Editor**. +3. Click the **Variables** tab to the right of the script editor. + + {{< img-hd src="/img/influxdb/2-0-variables-data-explorer-view.png" />}} diff --git a/content/influxdb/v2.5/visualize-data/visualization-types/_index.md b/content/influxdb/v2.5/visualize-data/visualization-types/_index.md new file mode 100644 index 000000000..0bc8eb1eb --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/visualization-types/_index.md @@ -0,0 +1,17 @@ +--- +title: Visualization types +description: > + The InfluxDB UI provides multiple visualization types to visualize your data in + a format that makes the most sense for your use case. Use the available customization + options to customize each visualization. +menu: + influxdb_2_5: + parent: Visualize data +weight: 105 +--- + +The InfluxDB UI provides multiple visualization types to visualize your data in +a format that makes the most sense for your use case. Use the available customization +options to customize each visualization. + +{{< children >}} diff --git a/content/influxdb/v2.5/visualize-data/visualization-types/band.md b/content/influxdb/v2.5/visualize-data/visualization-types/band.md new file mode 100644 index 000000000..bad27e094 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/visualization-types/band.md @@ -0,0 +1,174 @@ +--- +title: Band visualization +list_title: Band +list_image: /img/influxdb/2-0-visualizations-Band-example.png +description: +weight: 201 +menu: + influxdb_2_5: + name: Band + parent: Visualization types +--- + +The **Band** visualization displays the upper and lower boundaries for groups of data over time. Boundaries are determined by applying aggregate functions to your data for a specified window period, and then setting the aggregate functions for a specified upper, main, or lower boundary. + +## Set up the Band visualization + +To see bands (boundaries) in the **Band Plot** visualization, you must set up two or three boundaries for comparison. + +### Set up the band visualization in the Data Explorer + +1. Click the **Data Explorer** icon in the navigation bar. + + {{< nav-icon "data-explorer" >}} + +2. Enter your query (see [Explore data with Flux and the Data Explorer](/influxdb/v2.5/visualize-data/explore-metrics/#explore-data-with-flux-and-the-data-explorer)). You must include the aggregate functions used to determine the Band Plot visualization boundaries in your query. +3. Select the **Band Plot** option from the visualization dropdown in the upper left, and then click **Customize**. +4. Under **Data**, select the following: + - For **X Column** and **Y Column**, select the columns to display for the x- and y- axes. + - For **Time Format**, select the timestamp format to display in the visualization. +5. Under **Aggregate Functions**, select a function to determine each boundary (column) for comparison (select two or three): + - In the **Upper Column** field, select a function for the upper boundary. + - In the **Main Column** field, select a function for the main boundary. + - In the **Lower Column** field, select a function for the lower boundary. +6. (Optional) Continue to customize your visualization, including options such as interpolation, color, hover dimension, and y-axis settings. For more information, see [Options](#options) and [Y Axis](#y-axis) below. + + **Tip:** If you do not see shaded boundaries in the **Band Plot** visualization, verify the query window period includes a sufficient number of data points for the selected aggregate function. By default, the window period is automatically set to ten seconds (`10s`). To adjust your window period, select **Custom**, and then enter a supported time unit (for example nanoseconds (`ns`), microseconds (`us`), milliseconds (`ms`), seconds (`s`), or hours (`h`). + +{{< img-hd src="/img/influxdb/2-0-visualizations-Band-example.png" alt="Band example" />}} + +### Set up the band plot visualization in the Script Editor + +1. Click the **Data Explorer** icon in the navigation bar. + + {{< nav-icon "data-explorer" >}} + +2. Click **Script Editor**. +3. Select the **Band Plot** option from the visualization dropdown in the upper left. +4. Create three aggregate functions: one for the main boundary, one for the upper boundary, and one for the lower boundary. The following example uses the [`mean()`](/{{< latest "flux" >}}/stdlib/universe/mean/), [`max()`](/{{< latest "flux" >}}/stdlib/universe/max/), and [`min()`](/{{< latest "flux" >}}/stdlib/universe/min) functions: + +```js +from(bucket: "bucket_1") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r["_measurement"] == "cpu") + |> filter(fn: (r) => r["_field"] == "usage_system") + |> filter(fn: (r) => r["cpu"] == "cpu0" or r["cpu"] == "cpu1") + |> aggregateWindow(every: 15s, fn: mean, createEmpty: false) + |> yield(name: "mean") + +from(bucket: "bucket_1") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r["_measurement"] == "cpu") + |> filter(fn: (r) => r["_field"] == "usage_system") + |> filter(fn: (r) => r["cpu"] == "cpu0" or r["cpu"] == "cpu1") + |> aggregateWindow(every: 15s, fn: max, createEmpty: false) + |> yield(name: "max") + +from(bucket: "bucket_1") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r["_measurement"] == "cpu") + |> filter(fn: (r) => r["_field"] == "usage_system") + |> filter(fn: (r) => r["cpu"] == "cpu0" or r["cpu"] == "cpu1") + |> aggregateWindow(every: 15s, fn: min, createEmpty: false) + |> yield(name: "min") +``` + +5. (Optional) Customize the name of the yielded results for each function by editing the `name` parameter in the [`yield()`](/{{< latest "flux" >}}/stdlib/universe/yield/) function. +For example, to change the name of the first function from `mean` to `Average`, modify the last line to the following: + ```js + |> yield(name: "Average") + ``` +6. Click **Customize** in the upper left. +7. Under **Aggregate Functions**, enter the functions you created to determine each boundary (column) for comparison. If you changed the `yield` name for any of the functions above, enter the modified name here instead of the function name: + - In the **Upper Column Name** field, enter the result set to use for the upper boundary. + - In the **Main Column Name** field, enter the result set to use for the main boundary. + - In the **Lower Column Name** field, enter the function for the lower boundary. +7. (Optional) Continue to customize your visualization, including options such as interpolation, color, hover dimension, static legend, and y-axis settings. For more information, see [Options](#options) and [Y Axis](#y-axis) below. + +### Customize column names + +## Band behavior + +Like a line graph, the band visualization shows how values change over time. Additionally, it displays upper and lower "bands" for the measurements. + +For example, in the band chart above, the lines represent the mean `usage_system` values for the `cpu` measurement for `cpu0` and `cpu1`. The upper and lower limits of the bands are defined by the `max` and `min` aggregate functions, respectively. + +## Band controls + +To view **Band** controls, click **{{< icon "gear" >}} Customize** next to the visualization dropdown. + +###### Data + +- **X Column**: Select a column to display on the x-axis. +- **Y Column**: Select a column to display on the y-axis. +- **Time Format**: Select the time format. Options include: + {{< ui/timestamp-formats >}} + +###### Aggregate functions + +- **Upper Column**: Aggregate function to display for upper bounds of data. +- **Main Column**: Aggregate function to display for main graph line. +- **Lower Column**: Aggregate function to display for lower bounds of data. + +###### Options + +- **Interpolation**: + - **Linear**: Display a time series in a line graph. + - **Smooth**: Display a time series in a line graph with smooth point interpolation. + - **Step**: Display a time series in a staircase graph. +- **Line Colors**: Select a color scheme to use for your graph. +- **Hover Dimension**: Select the data to display in the tooltip when you hover over the graph: + - **auto** or **X-Axis**: Show all points with the same x value along the y-axis. + - **Y-Axis**: Show all points with the same y value along the x-axis. + - **X-Y Axis**: Show only the point being currently hovered over. + +###### X-Axis + +- **Generate X-Axis Tick Marks**: Select the method to generate x-axis tick marks: + - **Auto**: Select to automatically generate tick marks. + - **Custom**: To customize the number of x-axis tick marks, select this option, and then enter the following: + - **Total Tick Marks**: Enter the total number of timestamp ticks to display. + - **Start Tick Marks At**: Enter the time, in RFC3339 format, to start displaying ticks. Use the **Date Picker** field to automatically generate an RFC3339 formatted timestamp for this field. + - **Tick Mark Interval**: Enter the number of milliseconds in between each timestamp tick. + +###### Y-Axis + +- **Y Axis Label**: Enter the label for the y-axis. +- **Y-Value Unit Prefix**: Select the prefix to add to the y-value: + - **None**: Select to add no prefix. + - **SI**: (default) Select to add an International System of Units (SI) or metric prefix. + - **Binary**: Select to add a binary multiple prefix. +- **Y Axis Prefix**: Enter the prefix to add to the y-value. +- **Y Axis Suffix**: Enter the suffix to add to the y-value. +- **Generate Y-Axis Tick Marks**: Select the method to generate y-axis tick marks: + - **Auto**: Select to automatically generate tick marks. + - **Custom**: To customize the number of y-axis tick marks, select this option, and then enter the following: + - **Total Tick Marks**: Enter the total number of ticks to display. + - **Start Tick Marks At**: Enter the value to start ticks at. + - **Tick Mark Interval**: Enter the interval in between each tick. +- **Y Axis Domain**: Select the method to generate the y-axis value range: + - **Auto**: Select to automatically determine the value range based on values in the data set. + - **Custom**: To customize the y-axis domain, manually specify the minimum y-axis value, maximum y-axis value, or range by including both. + - **Min**: Enter the minimum y-axis value. + - **Max**: Enter the maximum y-axis value. + +###### Legend + +- **Hover Legend**: + - **Hide**: Hide the legend that appears upon hover. + - **Show**: Show the legend upon hover. + - **Orientation**: Select the orientation of the legend: + - **Horizontal**: Select to display the legend horizontally. + - **Vertical**: Select to display the legend vertically. + - **Opacity**: Adjust the hover legend opacity using the slider. + - **Colorize Rows**: Select to display hover legend rows in colors. +- **Static Legend**: + - **Hide**: Hide the static legend. + - **Show**: Always show the static legend. + - **Orientation**: Select the orientation of the legend: + - **Horizontal**: Select to display the legend horizontally. + - **Vertical**: Select to display the legend vertically. + - **Opacity**: Adjust the static legend opacity using the slider. + - **Colorize Rows**: Select to display static legend rows in colors. + - **Displayed Value**: Select **Latest Y Axis** or **Latest X Axis** to determine whether the y or x axis appears on the legend. + - **Height**: Adjust the height of the static legend using the slider. diff --git a/content/influxdb/v2.5/visualize-data/visualization-types/gauge.md b/content/influxdb/v2.5/visualize-data/visualization-types/gauge.md new file mode 100644 index 000000000..699d7df89 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/visualization-types/gauge.md @@ -0,0 +1,66 @@ +--- +title: Gauge visualization +list_title: Gauge +list_image: /img/influxdb/2-0-visualizations-gauge-example.png +description: > + The Gauge view displays the single value most recent value for a time series in a gauge view. +weight: 201 +menu: + influxdb_2_5: + name: Gauge + parent: Visualization types +--- + +The **Gauge** visualization displays the most recent value for a time series in a gauge. + +{{< img-hd src="/img/influxdb/2-0-visualizations-gauge-example-8.png" alt="Gauge example" />}} + +Select the **Gauge** option from the visualization dropdown in the upper left. + +## Gauge behavior +The gauge visualization displays a single numeric data point within a defined spectrum (_default is 0-100_). +It uses the latest point in the first table (or series) returned by the query. + +{{% note %}} +#### Queries should return one table +Flux does not guarantee the order in which tables are returned. +If a query returns multiple tables (or series), the table order can change between query executions +and result in the Gauge displaying inconsistent data. +For consistent results, the Gauge query should return a single table. +{{% /note %}} + +## Gauge Controls +To view **Gauge** controls, click **{{< icon "gear" >}} Customize** next to +the visualization dropdown. + +- **Value Prefix**: Prefix to add to the gauge value. +- **Value Suffix**: Suffix to add to the gauge value. +- **Axis Prefix**: Prefix to add to the gauge axis. +- **Axis Suffix**: Suffix to add to the gauge axis. +- **Decimal Places**: The number of decimal places to display for the gauge. + - **Auto** or **Custom**: Enable or disable auto-setting. + +###### Colorized Thresholds +- **Add a Threshold**: Change the color of the gauge based on the current value. + - **Minimum**: Enter the minimum value at which the gauge should appear in the selected color. + Choose a color from the dropdown menu next to the value. + - **Maximum**: Enter the maximum value at which the gauge should appear in the selected color. + Choose a color from the dropdown menu next to the value. + +## Gauge examples +Gauge visualizations are useful for showing the current value of a metric and displaying +where it falls within a spectrum. + +### Steam pressure gauge +The following example queries sensor data that tracks the pressure of steam pipes +in a facility and displays it as a gauge. + +###### Query pressure data from a specific sensor +```js +from(bucket: "example-bucket") + |> range(start: -1m) + |> filter(fn: (r) => r._measurement == "steam-sensors" and r._field == "psi" r.sensorID == "a211i") +``` + +###### Visualization options for pressure gauge +{{< img-hd src="/img/influxdb/2-0-visualizations-gauge-pressure-8.png" alt="Pressure guage example" />}} diff --git a/content/influxdb/v2.5/visualize-data/visualization-types/graph-single-stat.md b/content/influxdb/v2.5/visualize-data/visualization-types/graph-single-stat.md new file mode 100644 index 000000000..60b923547 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/visualization-types/graph-single-stat.md @@ -0,0 +1,140 @@ +--- +title: Graph + Single Stat visualization +list_title: Graph + Single Stat +list_image: /img/influxdb/2-0-visualizations-line-graph-single-stat-example.png +description: > + The Graph + Single Stat view displays the specified time series in a line graph + and overlays the single most recent value as a large numeric value. +weight: 202 +menu: + influxdb_2_5: + name: Graph + Single Stat + parent: Visualization types +related: + - /influxdb/v2.5/visualize-data/visualization-types/graph + - /influxdb/v2.5/visualize-data/visualization-types/single-stat +--- + +The **Graph + Single Stat** view displays the specified time series in a line graph +and overlays the single most recent value as a large numeric value. + +{{< img-hd src="/img/influxdb/2-0-visualizations-line-graph-single-stat-example-8.png" alt="Line Graph + Single Stat example" />}} + +Select the **Graph + Single Stat** option from the visualization dropdown in the upper left. + +## Graph + Single Stat behavior +The Graph visualization color codes each table (or series) in the queried data set. +When multiple series are present, it automatically assigns colors based on the selected [Line Colors option](#options). + +The Single Stat visualization displays a single numeric data point. +It uses the latest point in the first table (or series) returned by the query. + +{{% note %}} +#### Queries should return one table +Flux does not guarantee the order in which tables are returned. +If a query returns multiple tables (or series), the table order can change between query executions +and result in the Single Stat visualization displaying inconsistent data. +For consistent Single Stat results, the query should return a single table. +{{% /note %}} + +## Graph + Single Stat Controls +To view **Graph + Single Stat** controls, click **{{< icon "gear" >}} Customize** next to +the visualization dropdown. + +###### Data +- **X Column**: Select a column to display on the x-axis. +- **Y Column**: Select a column to display on the y-axis. +- **Time Format**: Select the time format. Options include: + {{< ui/timestamp-formats >}} + +###### Options +- **Line Colors**: Select a color scheme to use for your graph. +- **Hover Dimension**: Select the data to display in the tooltip when you hover over the graph: + - **auto** or **X Axis**: Show all points with the same x value along the y-axis. + - **Y Axis**: Show all points with the same y value along the x-axis. + - **X & Y Axis**: Show only the point currently being hovered over. +- **Shade area below graph**: Shade in the area below the graph lines. + +###### X Axis +- **Generate X-Axis Tick Marks**: Select the method to generate x-axis tick marks: + - **Auto**: Select to automatically generate tick marks. + - **Custom**: To customize the number of x-axis tick marks, select this option, and then enter the following: + - **Total Tick Marks**: Enter the total number of ticks to display. + - **Start Tick Marks At**: Enter the value to start ticks at. + - **Tick Mark Interval**: Enter the interval in between each tick. + +###### Y Axis +- **Y Axis Label**: Label for the y-axis. +- **Y Value Unit Prefix**: + - **None**: No prefix. + - **SI**: International System of Units (SI) or metric prefix. + - **Binary**: Binary multiple prefix. +- **Y Axis Prefix**: Prefix to be added to y-value. +- **Y Axis Suffix**: Suffix to be added to y-value. +- **Generate Y-Axis Tick Marks**: Select the method to generate y-axis tick marks: + - **Auto**: Select to automatically generate tick marks. + - **Custom**: To customize the number of y-axis tick marks, select this option, and then enter the following: + - **Total Tick Marks**: Enter the total number of ticks to display. + - **Start Tick Marks At**: Enter the value to start ticks at. + - **Tick Mark Interval**: Enter the interval in between each tick. +- **Y Axis Domain**: The y-axis value range. + - **Auto**: Automatically determine the value range based on values in the data set. + - **Custom**: Manually specify the minimum y-axis value, maximum y-axis value, or range by including both. + - **Min**: Minimum y-axis value. + - **Max**: Maximum y-axis value. +- **Positioning**: + - **Overlaid**: Display graph lines overlaid on each other. + - **Stacked**: Display graph lines stacked on top of each other. + +###### Customize Single-Stat +- **Prefix**: Prefix to be added to the single stat. +- **Suffix**: Suffix to be added to the single stat. +- **Decimal Places**: The number of decimal places to display for the single stat. + - **Auto** or **Custom**: Enable or disable auto-setting. + +###### Colorized Thresholds +- **Base Color**: Select a base or background color from the selection list. +- **Add a Threshold**: Change the color of the single stat based on the current value. + - **Value is**: Enter the value at which the single stat should appear in the selected color. + Choose a color from the dropdown menu next to the value. +- **Colorization**: Choose **Text** for the single stat to change color based on the configured thresholds. + Choose **Background** for the background of the graph to change color based on the configured thresholds. + +###### Hover Legend +- **Display Hover Legend**: + - **Hide**: Hide the legend that appears upon hover. + - **Show**: Show the legend upon hover. + - **Orientation**: Select the orientation of the legend: + - **Horizontal**: Select to display the legend horizontally. + - **Vertical**: Select to display the legend vertically. + - **Opacity**: Adjust the hover legend opacity using the slider. + - **Colorize Rows**: Select to display hover legend rows in colors. + +###### Static Legend + - **Display Static Legend**: + - **Hide**: Hide the static legend. + - **Show**: Always show the static legend. + - **Orientation**: Select the orientation of the legend: + - **Horizontal**: Select to display the legend horizontally. + - **Vertical**: Select to display the legend vertically. + - **Opacity**: Adjust the static legend opacity using the slider. + - **Colorize Rows**: Select to display static legend rows in colors. + - **Displayed Value**: Select **Latest Y Axis** or **Latest X Axis** to determine whether the y or x axis appears on the legend. + - **Height**: Adjust the height of the static legend using the slider. + + +## Graph + Single Stat examples +The primary use case for the Graph + Single Stat visualization is to show the current or latest +value as well as historical values. + +### Show current value and historical values +The following example shows the current percentage of memory used as well as memory usage over time: + +###### Query memory usage percentage +```js +from(bucket: "example-bucket") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") +``` +###### Memory allocations percentage visualization with static legend +{{< img-hd src="/img/influxdb/2-0-visualizations-graph-single-stat-mem-8.png" alt="Graph + Single Stat Memory Usage Example" />}} diff --git a/content/influxdb/v2.5/visualize-data/visualization-types/graph.md b/content/influxdb/v2.5/visualize-data/visualization-types/graph.md new file mode 100644 index 000000000..09a9f7fd7 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/visualization-types/graph.md @@ -0,0 +1,127 @@ +--- +title: Graph visualization +list_title: Graph +list_image: /img/influxdb/2-0-visualizations-line-graph-example.png +description: > + The Graph view lets you select from multiple graph types such as line graphs and bar graphs *(Coming)*. +weight: 201 +menu: + influxdb_2_5: + name: Graph + parent: Visualization types +--- + +The Graph visualization provides several types of graphs, each configured through +the [Graph controls](#graph-controls). + +{{< img-hd src="/img/influxdb/2-0-visualizations-line-graph-example-8.png" alt="Line Graph example" />}} + +Select the **Graph** option from the visualization dropdown in the upper left. + +## Graph behavior +The Graph visualization color codes each table (or series) in the queried data set. +When multiple series are present, it automatically assigns colors based on the selected [Line Colors option](#options). + +When using a line graph, all points within a single table are connected. When multiple series are present, it automatically assigns colors based on the selected [Line Colors option](#options). + +## Graph controls +To view **Graph** controls, click **{{< icon "gear" >}} Customize** next to +the visualization dropdown. + +###### Data +- **X Column**: Select a column to display on the x-axis. +- **Y Column**: Select a column to display on the y-axis. +{{% cloud-only %}} + +- **Adaptive Zoom**: Enable this option to zoom in on graphs for a more granular view. Zooming in dynamically updates the window period to re-query the data. + {{% warn %}} + If you've hard-coded the window period, we don't recommend enabling this option. + {{% /warn %}} + +{{% /cloud-only %}} + +###### Options +- **Time Format**: Select the time format. Options include: + {{< ui/timestamp-formats >}} +- **Interpolation**: Select from the following options: + - **Linear**: Display a time series in a line graph + - **Smooth**: Display a time series in a line graph with smooth point interpolation. + - **Step**: Display a time series in a staircase graph. + + +- **Line Colors**: Select a color scheme to use for your graph. +- **Shade Area Below Lines**: Shade in the area below the graph lines. +- **Hover Dimension**: Select the data to display in the tooltip when you hover over the graph: + - **auto** or **X Axis**: Show all points with the same x value along the y-axis. + - **Y Axis**: Show all points with the same y value along the x-axis. + - **X & Y Axis**: Show only the point currently being hovered over. + +###### X Axis +- **Generate X-Axis Tick Marks**: Select the method to generate x-axis tick marks: + - **Auto**: Select to automatically generate tick marks. + - **Custom**: To customize the number of x-axis tick marks, select this option, and then enter the following: + - **Total Tick Marks**: Enter the total number of ticks to display. + - **Start Tick Marks At**: Enter the value to start ticks at. + - **Tick Mark Interval**: Enter the interval in between each tick. + +###### Y Axis +- **Y Axis Label**: Label for the y-axis. +- **Y-Value Unit Prefix**: + - **None**: + - **SI**: + - **Binary**: +- **Y Axis Prefix**: Prefix to be added to y-value. +- **Y Axis Suffix**: Suffix to be added to y-value. +- **Generate Y-Axis Tick Marks**: Select the method to generate y-axis tick marks: + - **Auto**: Select to automatically generate tick marks. + - **Custom**: To customize the number of y-axis tick marks, select this option, and then enter the following: + - **Total Tick Marks**: Enter the total number of ticks to display. + - **Start Tick Marks At**: Enter the value to start ticks at. + - **Tick Mark Interval**: Enter the interval in between each tick. +- **Y Axis Domain**: The y-axis value range. + - **Auto**: Automatically determine the value range based on values in the data set. + - **Custom**: Manually specify the minimum y-axis value, maximum y-axis value, or range by including both. + - **Min**: Minimum y-axis value. + - **Max**: Maximum y-axis value. +- **Positioning**: + - **Overlaid**: Display graph lines overlaid on each other. + - **Stacked**: Display graph lines stacked on top of each other. + +###### Hover Legend +- **Display Hover Legend**: + - **Hide**: Hide the legend that appears upon hover. + - **Show**: Show the legend upon hover. + - **Orientation**: Select the orientation of the legend: + - **Horizontal**: Select to display the legend horizontally. + - **Vertical**: Select to display the legend vertically. + - **Opacity**: Adjust the hover legend opacity using the slider. + - **Colorize Rows**: Select to display hover legend rows in colors. + +###### Static Legend + - **Display Static Legend**: + - **Hide**: Hide the static legend. + - **Show**: Always show the static legend. + - **Orientation**: Select the orientation of the legend: + - **Horizontal**: Select to display the legend horizontally. + - **Vertical**: Select to display the legend vertically. + - **Opacity**: Adjust the static legend opacity using the slider. + - **Colorize Rows**: Select to display static legend rows in colors. + - **Displayed Value**: Select **Latest Y Axis** or **Latest X Axis** to determine whether the y or x axis appears on the legend. + - **Height**: Adjust the height of the static legend using the slider. + +## Graph Examples + +##### Graph with linear interpolation and static legend +{{< img-hd src="/img/influxdb/2-0-visualizations-graph-linear-static.png" alt="Line Graph example" />}} + +##### Graph with smooth interpolation and hover legend +{{< img-hd src="/img/influxdb/2-0-visualizations-graph-smooth-hover.png" alt="Step-Plot Graph example" />}} + +##### Graph with step interpolation and no visible legend +{{< img-hd src="/img/influxdb/2-0-visualizations-line-graph-step-example-8.png" alt="Step-Plot Graph example" />}} + + + + diff --git a/content/influxdb/v2.5/visualize-data/visualization-types/heatmap.md b/content/influxdb/v2.5/visualize-data/visualization-types/heatmap.md new file mode 100644 index 000000000..f14ed8056 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/visualization-types/heatmap.md @@ -0,0 +1,127 @@ +--- +title: Heatmap visualization +list_title: Heatmap +list_image: /img/influxdb/2-0-visualizations-heatmap-example.png +description: > + A Heatmap displays the distribution of data on an x and y axes where color + represents different concentrations of data points. +weight: 202 +menu: + influxdb_2_5: + name: Heatmap + parent: Visualization types +related: + - /influxdb/v2.5/visualize-data/visualization-types/scatter +--- + +A **Heatmap** displays the distribution of data on an x and y axes where color +represents different concentrations of data points. + +{{< img-hd src="/img/influxdb/2-0-visualizations-heatmap-example.png" alt="Heatmap example" />}} + +Select the **Heatmap** option from the visualization dropdown in the upper left. + +## Heatmap behavior +Heatmaps divide data points into "bins" – segments of the visualization with upper +and lower bounds for both [X and Y axes](#data). +The [Bin Size option](#options) determines the bounds for each bin. +The total number of points that fall within a bin determine the its value and color. +Warmer or brighter colors represent higher bin values or density of points within the bin. + +## Heatmap Controls +To view **Heatmap** controls, click **{{< icon "gear" >}} Customize** next to +the visualization dropdown. + +###### Data +- **X Column**: Select a column to display on the x-axis. +- **Y Column**: Select a column to display on the y-axis. +- **Time Format**: Select the time format. Options include: + {{< ui/timestamp-formats >}} + +###### Options +- **Color Scheme**: Select a color scheme to use for your heatmap. +- **Bin Size**: Specify the size of each bin. Default is 10. + +###### X Axis +- **X Axis Label**: Label for the x-axis. +- **Generate X-Axis Tick Marks**: Select the method to generate x-axis tick marks: + - **Auto**: Select to automatically generate tick marks. + - **Custom**: To customize the number of x-axis tick marks, select this option, and then enter the following: + - **Total Tick Marks**: Enter the total number of ticks to display. + - **Start Tick Marks At**: Enter the value to start ticks at. + - **Tick Mark Interval**: Enter the interval in between each tick. +- **X Axis Domain**: The x-axis value range. + - **Auto**: Automatically determine the value range based on values in the data set. + - **Custom**: Manually specify the minimum y-axis value, maximum y-axis value, or range by including both. + - **Min**: Minimum x-axis value. + - **Max**: Maximum x-axis value. + +###### Y Axis +- **Y Axis Label**: Label for the y-axis. +- **Y Tick Prefix**: Prefix to be added to y-value. +- **Y Tick Suffix**: Suffix to be added to y-value. +- **Generate Y-Axis Tick Marks**: Select the method to generate y-axis tick marks: + - **Auto**: Select to automatically generate tick marks. + - **Custom**: To customize the number of y-axis tick marks, select this option, and then enter the following: + - **Total Tick Marks**: Enter the total number of ticks to display. + - **Start Tick Marks At**: Enter the value to start ticks at. + - **Tick Mark Interval**: Enter the interval in between each tick. +- **Y Axis Domain**: The y-axis value range. + - **Auto**: Automatically determine the value range based on values in the data set. + - **Custom**: Manually specify the minimum y-axis value, maximum y-axis value, or range by including both. + - **Min**: Minimum y-axis value. + - **Max**: Maximum y-axis value. + +###### Hover Legend +- **Orientation**: Select the orientation of the legend that appears upon hover: + - **Horizontal**: Select to display the legend horizontally. + - **Vertical**: Select to display the legend vertically. +- **Opacity**: Adjust the legend opacity using the slider. +- **Colorize Rows**: Select to display legend rows in colors. + +## Heatmap examples + +### Cross-measurement correlation +The following example explores possible correlation between CPU and Memory usage. +It uses data collected with the Telegraf [Mem](/{{< latest "telegraf" >}}/plugins//#mem) +and [CPU](/{{< latest "telegraf" >}}/plugins//#cpu) input plugins. + +###### Join CPU and memory usage +The following query joins CPU and memory usage on `_time`. +Each row in the output table contains `_value_cpu` and `_value_mem` columns. + +```js +cpu = from(bucket: "example-bucket") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r._measurement == "cpu" and r._field == "usage_system" and r.cpu == "cpu-total") + +mem = from(bucket: "example-bucket") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") + +join(tables: {cpu: cpu, mem: mem}, on: ["_time"], method: "inner") +``` + +###### Use a heatmap to visualize correlation +In the Heatmap visualization controls, `_value_cpu` is selected as the [X Column](#data) +and `_value_mem` is selected as the [Y Column](#data). +The domain for each axis is also customized to account for the scale difference +between column values. + +{{< img-hd src="/img/influxdb/2-0-visualizations-heatmap-correlation.png" alt="Heatmap correlation example" />}} + + +## Important notes + +### Differences between a heatmap and a scatter plot +Heatmaps and [Scatter plots](/influxdb/v2.5/visualize-data/visualization-types/scatter/) +both visualize the distribution of data points on X and Y axes. +However, in certain cases, heatmaps provide better visibility into point density. + +For example, the dashboard cells below visualize the same query results: + +{{< img-hd src="/img/influxdb/2-0-visualizations-heatmap-vs-scatter.png" alt="Heatmap vs Scatter plot" />}} + +The heatmap indicates isolated high point density, which isn't visible in the scatter plot. +In the scatter plot visualization, points that share the same X and Y coordinates +appear as a single point. diff --git a/content/influxdb/v2.5/visualize-data/visualization-types/histogram.md b/content/influxdb/v2.5/visualize-data/visualization-types/histogram.md new file mode 100644 index 000000000..719789a1d --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/visualization-types/histogram.md @@ -0,0 +1,84 @@ +--- +title: Histogram visualization +list_title: Histogram +list_image: /img/influxdb/2-0-visualizations-histogram-example.png +description: > + A histogram is a way to view the distribution of data. + The y-axis is dedicated to count, and the x-axis is divided into bins. +weight: 202 +menu: + influxdb_2_5: + name: Histogram + parent: Visualization types +--- + +A histogram is a way to view the distribution of data. +The y-axis is dedicated to count, and the X-axis is divided into bins. + +{{< img-hd src="/img/influxdb/2-0-visualizations-histogram-example.png" alt="Histogram example" />}} + +Select the **Histogram** option from the visualization dropdown in the upper left. + +## Histogram behavior +The Histogram visualization is a bar graph that displays the number of data points +that fall within "bins" – segments of the X axis with upper and lower bounds. +Bin thresholds are determined by dividing the width of the X axis by the number +of bins set using the [Bins option](#options). +Data within bins can be further grouped or segmented by selecting columns in the +[Group By option](#options). + +{{% note %}} +The Histogram visualization automatically bins, segments, and counts data. +To work properly, query results **should not** be structured as histogram data. +{{% /note %}} + +## Histogram Controls +To view **Histogram** controls, click **{{< icon "gear" >}} Customize** next to +the visualization dropdown. + +###### Data +- **X Column**: The column to select data from. +- **Group By**: The column to group by. + +###### Options +- **Color Scheme**: Select a color scheme to use for your graph. +- **Positioning**: Select **Stacked** to stack groups in a bin on top of each other. + Select **Overlaid** to overlay groups in each bin. +- **Bins**: Enter a number of bins to divide data into or select Auto to automatically + calculate the number of bins. + - **Auto** or **Custom**: Enable or disable auto-setting. + +###### X Axis +- **X Axis Label**: Label for the x-axis. +- **X Axis Domain**: The x-axis value range. + - **Auto**: Automatically determine the value range based on values in the data set. + - **Custom**: Manually specify the value range of the x-axis. + - **Min**: Minimum x-axis value. + - **Max**: Maximum x-axis value. + +###### Hover Legend +- **Orientation**: Select the orientation of the legend that appears upon hover: + - **Horizontal**: Select to display the legend horizontally. + - **Vertical**: Select to display the legend vertically. +- **Opacity**: Adjust the legend opacity using the slider. +- **Colorize Rows**: Select to display legend rows in colors. + +## Histogram examples + +### View error counts by severity over time +The following example uses the Histogram visualization to show the number of errors +"binned" by time and segmented by severity. +_It utilizes data from the [Telegraf Syslog plugin](/{{< latest "telegraf" >}}/plugins//#syslog)._ + +##### Query for errors by severity code +```js +from(bucket: "example-bucket") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r._measurement == "syslog" and r._field == "severity_code") +``` + +##### Histogram settings +In the Histogram visualization options, select `_time` as the [X Column](#data) +and `severity` as the [Group By](#data) option: + +{{< img-hd src="/img/influxdb/2-0-visualizations-histogram-errors.png" alt="Errors histogram" />}} diff --git a/content/influxdb/v2.5/visualize-data/visualization-types/mosaic.md b/content/influxdb/v2.5/visualize-data/visualization-types/mosaic.md new file mode 100644 index 000000000..2fba70905 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/visualization-types/mosaic.md @@ -0,0 +1,76 @@ +--- +title: Mosaic visualization +list_title: Mosaic +list_image: /img/influxdb/2-0-visualizations-mosaic-example.png +description: > + The Mosaic visualization displays state changes in your time series data. + This visualization type is useful when you want to show changes in string-based states over time. +weight: 202 +menu: + influxdb_2_5: + name: Mosaic + parent: Visualization types +--- + +The **Mosaic** visualization displays state changes in your time series data. +This visualization type is useful when you want to show changes in string-based states over time. + +{{< img-hd src="/img/influxdb/2-0-visualizations-mosaic-example.png" alt="Mosaic data visualization" />}} + +Select the **Mosaic** option from the visualization dropdown in the upper left. + +## Mosaic behavior +The mosaic visualization displays colored tiles based on string values in a specified column. +Each unique string value is represented by a different color. + +## Mosaic controls +To view **Mosaic** controls, click **{{< icon "gear" >}} Customize** next to the visualization dropdown. + +###### Data +- **Fill Column**: Select a column to fill in the mosaic tiles. +- **X Column**: Select a column to display on the x-axis. +- **Y Column**: Select one or more columns to display on the y-axis. +- **Time Format**: Select the time format. Options include: + {{< ui/timestamp-formats >}} + +###### Options +- **Color Scheme**: Select a color scheme to use for your graph. + +###### X Axis +- **X Axis Label**: Enter a label for the x-axis. +- **Generate X-Axis Tick Marks**: Select the method to generate x-axis tick marks: + - **Auto**: Select to automatically generate tick marks. + - **Custom**: To customize the number of x-axis tick marks, select this option, and then enter the following: + - **Total Tick Marks**: Enter the total number of timestamp ticks to display. + - **Start Tick Marks At**: Enter the time, in RFC3339 format, to start displaying ticks. Use the **Date Picker** field to automatically generate an RFC3339 formatted timestamp for this field. + - **Tick Mark Interval**: Enter the number of milliseconds in between each timestamp tick. + +###### Y Axis +- **Y Axis Label**: Enter a label for the y-axis. +- **Y Label Separator**: If there's more than one column on the y-axis, enter a delimiter to separate the label, such as a comma or space. If there's no separator specified, the labels are a continuous string of all y columns. +- **Generate Y-Axis Tick Marks**: Select the method to generate y-axis tick marks: + - **Auto**: Select to automatically generate tick marks. + - **Custom**: To customize the number of y-axis tick marks, select this option, and then enter the following: + - **Total Tick Marks**: Enter the total number of ticks to display. + - **Start Tick Marks At**: Enter the value to start ticks at. + - **Tick Mark Interval**: Enter the interval in between each tick. + +###### Hover Legend +- **Orientation**: Select the orientation of the legend that appears upon hover: + - **Horizontal**: Select to display the legend horizontally. + - **Vertical**: Select to display the legend vertically. +- **Opacity**: Adjust the legend opacity using the slider. +- **Colorize Rows**: Select to display legend rows in colors. + +## Example query +The following query uses the [NOAA water sample data](/influxdb/v2.5/reference/sample-data/#noaa-water-sample-data) +to display changes in water levels over time. +Use `level description` as the **Fill Column** in the [visualization controls](#data). + +```js +from(bucket: "noaa") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r._measurement == "h2o_feet") + |> filter(fn: (r) => r._field == "level description") + |> aggregateWindow(every: v.windowPeriod, fn: last, createEmpty: false) +``` diff --git a/content/influxdb/v2.5/visualize-data/visualization-types/scatter.md b/content/influxdb/v2.5/visualize-data/visualization-types/scatter.md new file mode 100644 index 000000000..0f9e02668 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/visualization-types/scatter.md @@ -0,0 +1,124 @@ +--- +title: Scatter visualization +list_title: Scatter +list_image: /img/influxdb/2-0-visualizations-scatter-example.png +description: > + The Scatter view uses a scatter plot to display time series data. +weight: 202 +menu: + influxdb_2_5: + name: Scatter + parent: Visualization types +related: + - /influxdb/v2.5/visualize-data/visualization-types/heatmap +--- + +The **Scatter** view uses a scatter plot to display time series data. + +{{< img-hd src="/img/influxdb/2-0-visualizations-scatter-example.png" alt="Scatter plot example" />}} + +Select the **Scatter** option from the visualization dropdown in the upper left. + +## Scatter behavior +The scatter visualization maps each data point to X and Y coordinates. +X and Y axes are specified with the [X Column](#data) and [Y Column](#data) visualization options. +Each unique series is differentiated using fill colors and symbols. +Use the [Symbol Column](#data) and [Fill Column](#data) options to select columns +used to differentiate points in the visualization. + +## Scatter controls +To view **Scatter** controls, click **{{< icon "gear" >}} Customize** next to +the visualization dropdown. + +###### Data +- **Symbol Column**: Define a column containing values that should be differentiated with symbols. +- **Fill Column**: Define a column containing values that should be differentiated with fill color. +- **X Column**: Select a column to display on the x-axis. +- **Y Column**: Select a column to display on the y-axis. +- **Time Format**: Select the time format. Options include: + {{< ui/timestamp-formats >}} + +###### Options +- **Color Scheme**: Select a color scheme to use for your scatter plot. + +###### X Axis +- **X Axis Label**: Label for the x-axis. +- **Generate X-Axis Tick Marks**: Select the method to generate x-axis tick marks: + - **Auto**: Select to automatically generate tick marks. + - **Custom**: To customize the number of x-axis tick marks, select this option, and then enter the following: + - **Total Tick Marks**: Enter the total number of ticks to display. + - **Start Tick Marks At**: Enter the value to start ticks at. + - **Tick Mark Interval**: Enter the interval in between each tick. +- **X Axis Domain**: The x-axis value range. + - **Auto**: Automatically determine the value range based on values in the data set. + - **Custom**: Manually specify the minimum x-axis value, maximum x-axis value, or range by including both. + - **Min**: Minimum x-axis value. + - **Max**: Maximum x-axis value. + +###### Y Axis +- **Y Axis Label**: Label for the y-axis. +- **Y Tick Prefix**: Prefix to be added to y-value. +- **Y Tick Suffix**: Suffix to be added to y-value. +- **Generate Y-Axis Tick Marks**: Select the method to generate y-axis tick marks: + - **Auto**: Select to automatically generate tick marks. + - **Custom**: To customize the number of y-axis tick marks, select this option, and then enter the following: + - **Total Tick Marks**: Enter the total number of ticks to display. + - **Start Tick Marks At**: Enter the value to start ticks at. + - **Tick Mark Interval**: Enter the interval in between each tick. +- **Y Axis Domain**: The y-axis value range. + - **Auto**: Automatically determine the value range based on values in the data set. + - **Custom**: Manually specify the minimum y-axis value, maximum y-axis value, or range by including both. + - **Min**: Minimum y-axis value. + - **Max**: Maximum y-axis value. + +###### Hover Legend +- **Orientation**: Select the orientation of the legend that appears upon hover: + - **Horizontal**: Select to display the legend horizontally. + - **Vertical**: Select to display the legend vertically. +- **Opacity**: Adjust the legend opacity using the slider. +- **Colorize Rows**: Select to display legend rows in colors. + +## Scatter examples + +### Cross-measurement correlation +The following example explores possible correlation between CPU and Memory usage. +It uses data collected with the Telegraf [Mem](/{{< latest "telegraf" >}}/plugins//#mem) +and [CPU](/{{< latest "telegraf" >}}/plugins//#cpu) input plugins. + +###### Query CPU and memory usage +The following query creates a union of CPU and memory usage. +It scales the CPU usage metric to better align with baseline memory usage. + +```js +cpu = from(bucket: "example-bucket") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r._measurement == "cpu" and r._field == "usage_system" and r.cpu == "cpu-total") + // Scale CPU usage + |> map(fn: (r) => ({r with _value: r._value + 60.0, _time: r._time})) + +mem = from(bucket: "example-bucket") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") + +union(tables: [cpu, mem]) +``` + +###### Use a scatter plot to visualize correlation +In the Scatter visualization controls, points are differentiated based on their group keys. + +{{< img-hd src="/img/influxdb/2-0-visualizations-scatter-correlation.png" alt="Heatmap correlation example" />}} + +## Important notes + +### Differences between a scatter plot and a heatmap +Scatter plots and [Heatmaps](/influxdb/v2.5/visualize-data/visualization-types/heatmap/) +both visualize the distribution of data points on X and Y axes. +However, in certain cases, scatterplots can "hide" points if they share the same X and Y coordinates. + +For example, the dashboard cells below visualize the same query results: + +{{< img-hd src="/img/influxdb/2-0-visualizations-heatmap-vs-scatter.png" alt="Heatmap vs Scatter plot" />}} + +The heatmap indicates isolated high point density, which isn't visible in the scatter plot. +In the scatter plot visualization, points that share the same X and Y coordinates +appear as a single point. diff --git a/content/influxdb/v2.5/visualize-data/visualization-types/single-stat.md b/content/influxdb/v2.5/visualize-data/visualization-types/single-stat.md new file mode 100644 index 000000000..ad212fcbd --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/visualization-types/single-stat.md @@ -0,0 +1,62 @@ +--- +title: Single Stat visualization +list_title: Single stat +list_image: /img/influxdb/2-0-visualizations-single-stat-example.png +description: > + The Single Stat view displays the most recent value of the specified time series as a numerical value. +weight: 202 +menu: + influxdb_2_5: + name: Single Stat + parent: Visualization types +--- + +The **Single Stat** view displays the most recent value of the specified time series as a numerical value. + +{{< img-hd src="/img/influxdb/2-0-visualizations-single-stat-example-8.png" alt="Single stat example" />}} + +Select the **Single Stat** option from the visualization dropdown in the upper left. + +## Single Stat behavior +The Single Stat visualization displays a single numeric data point. +It uses the latest point in the first table (or series) returned by the query. + +{{% note %}} +#### Queries should return one table +Flux does not guarantee the order in which tables are returned. +If a query returns multiple tables (or series), the table order can change between query executions +and result in the Single Stat visualization displaying inconsistent data. +For consistent results, the Single Stat query should return a single table. +{{% /note %}} + +## Single Stat Controls +To view **Single Stat** controls, click **{{< icon "gear" >}} Customize** next to +the visualization dropdown. + +- **Prefix**: Prefix to be added to the single stat. +- **Suffix**: Suffix to be added to the single stat. +- **Decimal Places**: The number of decimal places to display for the single stat. + - **Auto** or **Custom**: Enable or disable auto-setting. + +###### Colorized Thresholds +- **Base Color**: Select a base or background color from the selection list. +- **Add a Threshold**: Change the color of the single stat based on the current value. + - **Value is**: Enter the value at which the single stat should appear in the selected color. + Choose a color from the dropdown menu next to the value. +- **Colorization**: Choose **Text** for the single stat to change color based on the configured thresholds. + Choose **Background** for the background of the graph to change color based on the configured thresholds. + +## Single Stat examples + +### Show human-readable current value +The following example shows the current memory usage displayed has a human-readable percentage: + +###### Query memory usage percentage +```js +from(bucket: "example-bucket") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") +``` + +###### Memory usage as a single stat +{{< img-hd src="/img/influxdb/2-0-visualizations-single-stat-example-8.png" alt="Graph + Single Stat Memory Usage Example" />}} diff --git a/content/influxdb/v2.5/visualize-data/visualization-types/table.md b/content/influxdb/v2.5/visualize-data/visualization-types/table.md new file mode 100644 index 000000000..5830ae360 --- /dev/null +++ b/content/influxdb/v2.5/visualize-data/visualization-types/table.md @@ -0,0 +1,75 @@ +--- +title: Table visualization +list_title: Table +list_image: /img/influxdb/2-0-visualizations-table-example.png +description: > + The Table option displays the results of queries in a tabular view, which is + sometimes easier to analyze than graph views of data. +weight: 202 +menu: + influxdb_2_5: + name: Table + parent: Visualization types +--- + +The **Table** option displays the results of queries in a tabular view, which is +sometimes easier to analyze than graph views of data. + +{{< img-hd src="/img/influxdb/2-0-visualizations-table-example.png" alt="Table example" />}} + +Select the **Table** option from the visualization dropdown in the upper left. + +## Table behavior +The table visualization renders queried data in structured, easy-to-read tables. +Columns and rows match those in the query output. +If query results contain multiple tables, only one table is shown at a time. +Select other output tables in the far left column of the table visualization. +Tables are identified by their [group key](/{{< latest "flux" >}}/get-started/data-model/#group-key). + +## Table Controls +To view **Table** controls, click **{{< icon "gear" >}} Customize** next to +the visualization dropdown. + +###### Formatting +- **Default Sort Field**: Select the default sort field. Default is **time**. +- **Time Format**: Select the time format. Options include: + {{< ui/timestamp-formats >}} + +- **Decimal Places**: Enter the number of decimal places. Default (empty field) is **unlimited**. + - **Auto** or **Custom**: Enable or disable auto-setting. + +###### Colorized Thresholds +- **Base Color**: Select a base or background color from the selection list. +- **Add a Threshold**: Change the color of the table based on the current value. + - **Value is**: Enter the value at which the table should appear in the selected color. + Choose a color from the dropdown menu next to the value. + +###### Column Settings +- **First Column**: Toggle to **Fixed** to lock the first column so that the listings are always visible. + Threshold settings do not apply in the first column when locked. +- **Table Columns**: + - Enter a new name to rename any of the columns. + - Click the eye icon next to a column to hide it. + - [additional]: Enter name for each additional column. + - Change the order of the columns by dragging to the desired position. + +## Table examples +Tables are helpful when displaying many human-readable metrics in a dashboard +such as cluster statistics or log messages. + +### Human-readable cluster metrics +The following example queries the latest reported memory usage from a cluster of servers. + +###### Query the latest memory usage from each host +```js +from(bucket: "example-bucket") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent") + |> group(columns: ["host"]) + |> last() + |> group() + |> keep(columns: ["_value", "host"]) +``` + +###### Cluster metrics in a table +{{< img-hd src="/img/influxdb/2-0-visualizations-table-human-readable.png" alt="Human readable metrics in a table" />}} diff --git a/content/influxdb/v2.5/write-data/_index.md b/content/influxdb/v2.5/write-data/_index.md new file mode 100644 index 000000000..a5f10bab3 --- /dev/null +++ b/content/influxdb/v2.5/write-data/_index.md @@ -0,0 +1,37 @@ +--- +title: Write data to InfluxDB +list_title: Write data +description: > + Collect and write time series data to InfluxDB Cloud and InfluxDB OSS. +weight: 4 +aliases: + - /influxdb/v2.5/write-data/quick-start/ + - /influxdb/v2.5/write-data/sample-data/demo-data/ +menu: + influxdb_2_5: + name: Write data +influxdb/v2.5/tags: [write, line protocol] +related: + - /influxdb/v2.5/write-data/no-code/use-telegraf/ + - /influxdb/v2.5/api/#tag/Write, InfluxDB API /write endpoint + - /influxdb/v2.5/reference/syntax/line-protocol + - /influxdb/v2.5/reference/syntax/annotated-csv + - /influxdb/v2.5/reference/cli/influx/write + - /influxdb/v2.5/migrate-data/ + - /resources/videos/ingest-data/, How to Ingest Data in InfluxDB (Video) +--- + +1. Learn the [best practices](/influxdb/v2.5/write-data/best-practices/) for writing data. +2. Discover how to write data [without coding](/influxdb/v2.5/write-data/no-code/), by [loading data source in the UI](/influxdb/v2.5/write-data/no-code/load-data/), or using [developer tools](/influxdb/v2.5/write-data/developer-tools/). +3. Do any of the following: + - [Troubleshoot the most common issues writing data](/influxdb/v2.5/write-data/troubleshoot/) + - [Delete data you no longer need](/influxdb/v2.5/write-data/delete-data/) + - [Query and explore data](/influxdb/v2.5/query-data/) + - [Process data](/influxdb/v2.5/process-data/) + - [Visualize data](/influxdb/v2.5/visualize-data/) + - [Migrate data](/influxdb/v2.5/migrate-data/) + - [Monitor and alert](/influxdb/v2.5/monitor-alert/) + +The following video discusses different ways to write data to InfluxDB: + +{{< youtube xYnFVvRTlkQ >}} \ No newline at end of file diff --git a/content/influxdb/v2.5/write-data/best-practices/_index.md b/content/influxdb/v2.5/write-data/best-practices/_index.md new file mode 100644 index 000000000..413f05480 --- /dev/null +++ b/content/influxdb/v2.5/write-data/best-practices/_index.md @@ -0,0 +1,17 @@ +--- +title: Best practices for writing data +seotitle: Best practices for writing data to InfluxDB +description: > + Learn about the recommendations and best practices for writing data to InfluxDB. +weight: 105 +menu: + influxdb_2_5: + name: Best practices + identifier: write-best-practices + parent: Write data +--- + +The following articles provide recommendations and best practices for writing +data to InfluxDB. + +{{< children >}} diff --git a/content/influxdb/v2.5/write-data/best-practices/duplicate-points.md b/content/influxdb/v2.5/write-data/best-practices/duplicate-points.md new file mode 100644 index 000000000..b164cbace --- /dev/null +++ b/content/influxdb/v2.5/write-data/best-practices/duplicate-points.md @@ -0,0 +1,131 @@ +--- +title: Handle duplicate data points +seotitle: Handle duplicate data points when writing to InfluxDB +description: > + InfluxDB identifies unique data points by their measurement, tag set, and timestamp. + This article discusses methods for preserving data from two points with a common + measurement, tag set, and timestamp but a different field set. +weight: 204 +menu: + influxdb_2_5: + name: Handle duplicate points + parent: write-best-practices +influxdb/v2.5/tags: [best practices, write] +--- + +InfluxDB identifies unique data points by their measurement, tag set, and timestamp +(each a part of [Line protocol](/influxdb/v2.5/reference/syntax/line-protocol) used to write data to InfluxDB). + +```txt +web,host=host2,region=us_west firstByte=15.0 1559260800000000000 +--- ------------------------- ------------------- + | | | +Measurement Tag set Timestamp +``` + +## Duplicate data points +For points that have the same measurement name, tag set, and timestamp, +InfluxDB creates a union of the old and new field sets. +For any matching field keys, InfluxDB uses the field value of the new point. +For example: + +```sh +# Existing data point +web,host=host2,region=us_west firstByte=24.0,dnsLookup=7.0 1559260800000000000 + +# New data point +web,host=host2,region=us_west firstByte=15.0 1559260800000000000 +``` + +After you submit the new data point, InfluxDB overwrites `firstByte` with the new +field value and leaves the field `dnsLookup` alone: + +```sh +# Resulting data point +web,host=host2,region=us_west firstByte=15.0,dnsLookup=7.0 1559260800000000000 +``` + +```sh +from(bucket: "example-bucket") + |> range(start: 2019-05-31T00:00:00Z, stop: 2019-05-31T12:00:00Z) + |> filter(fn: (r) => r._measurement == "web") + +Table: keys: [_measurement, host, region] + _time _measurement host region dnsLookup firstByte +-------------------- ------------ ----- ------- --------- --------- +2019-05-31T00:00:00Z web host2 us_west 7 15 +``` + +## Preserve duplicate points +To preserve both old and new field values in duplicate points, use one of the following strategies: + +- [Add an arbitrary tag](#add-an-arbitrary-tag) +- [Increment the timestamp](#increment-the-timestamp) + +### Add an arbitrary tag +Add an arbitrary tag with unique values so InfluxDB reads the duplicate points as unique. + +For example, add a `uniq` tag to each data point: + +```sh +# Existing point +web,host=host2,region=us_west,uniq=1 firstByte=24.0,dnsLookup=7.0 1559260800000000000 + +# New point +web,host=host2,region=us_west,uniq=2 firstByte=15.0 1559260800000000000 +``` + +{{% note %}} +It is not necessary to retroactively add the unique tag to the existing data point. +Tag sets are evaluated as a whole. +The arbitrary `uniq` tag on the new point allows InfluxDB to recognize it as a unique point. +However, this causes the schema of the two points to differ and may lead to challenges when querying the data. +{{% /note %}} + +After writing the new point to InfluxDB: + +```sh +from(bucket: "example-bucket") + |> range(start: 2019-05-31T00:00:00Z, stop: 2019-05-31T12:00:00Z) + |> filter(fn: (r) => r._measurement == "web") + +Table: keys: [_measurement, host, region, uniq] + _time _measurement host region uniq firstByte dnsLookup +-------------------- ------------ ----- ------- ---- --------- --------- +2019-05-31T00:00:00Z web host2 us_west 1 24 7 + +Table: keys: [_measurement, host, region, uniq] + _time _measurement host region uniq firstByte +-------------------- ------------ ----- ------- ---- --------- +2019-05-31T00:00:00Z web host2 us_west 2 15 +``` + +### Increment the timestamp +Increment the timestamp by a nanosecond to enforce the uniqueness of each point. + +```sh +# Old data point +web,host=host2,region=us_west firstByte=24.0,dnsLookup=7.0 1559260800000000000 + +# New data point +web,host=host2,region=us_west firstByte=15.0 1559260800000000001 +``` + +After writing the new point to InfluxDB: + +```sh +from(bucket: "example-bucket") + |> range(start: 2019-05-31T00:00:00Z, stop: 2019-05-31T12:00:00Z) + |> filter(fn: (r) => r._measurement == "web") + +Table: keys: [_measurement, host, region] + _time _measurement host region firstByte dnsLookup +------------------------------ ------------ ----- ------- --------- --------- +2019-05-31T00:00:00.000000000Z web host2 us_west 24 7 +2019-05-31T00:00:00.000000001Z web host2 us_west 15 +``` + +{{% note %}} +The output of examples queries in this article has been modified to clearly show +the different approaches and results for handling duplicate data. +{{% /note %}} diff --git a/content/influxdb/v2.5/write-data/best-practices/optimize-writes.md b/content/influxdb/v2.5/write-data/best-practices/optimize-writes.md new file mode 100644 index 000000000..c89b78846 --- /dev/null +++ b/content/influxdb/v2.5/write-data/best-practices/optimize-writes.md @@ -0,0 +1,132 @@ +--- +title: Optimize writes to InfluxDB +description: > + Simple tips to optimize performance and system overhead when writing data to InfluxDB. +weight: 203 +menu: + influxdb_2_5: + parent: write-best-practices +influxdb/v2.5/tags: [best practices, write] +--- + +Use these tips to optimize performance and system overhead when writing data to InfluxDB. + +- [Batch writes](#batch-writes) +- [Sort tags by key](#sort-tags-by-key) +- [Use the coarsest time precision possible](#use-the-coarsest-time-precision-possible) +- [Use gzip compression](#use-gzip-compression) +- [Synchronize hosts with NTP](#synchronize-hosts-with-ntp) +- [Write multiple data points in one request](#write-multiple-data-points-in-one-request) + +{{% note %}} +The following tools write to InfluxDB and employ _most_ write optimizations by default: + +- [Telegraf](/influxdb/v2.5/write-data/no-code/use-telegraf/) +- [InfluxDB client libraries](/influxdb/v2.5/api-guide/client-libraries/) +- [InfluxDB scrapers](/influxdb/v2.5/write-data/no-code/scrape-data/) +{{% /note %}} + +## Batch writes + +Write data in batches to minimize network overhead when writing data to InfluxDB. + +{{% note %}} +The optimal batch size is 5000 lines of line protocol. +{{% /note %}} + +## Sort tags by key + +Before writing data points to InfluxDB, sort tags by key in lexicographic order. +_Verify sort results match results from the [Go `bytes.Compare` function](http://golang.org/pkg/bytes/#Compare)._ + +```sh +# Line protocol example with unsorted tags +measurement,tagC=therefore,tagE=am,tagA=i,tagD=i,tagB=think fieldKey=fieldValue 1562020262 + +# Optimized line protocol example with tags sorted by key +measurement,tagA=i,tagB=think,tagC=therefore,tagD=i,tagE=am fieldKey=fieldValue 1562020262 +``` + +## Use the coarsest time precision possible + +By default, InfluxDB writes data in nanosecond precision. +However if your data isn't collected in nanoseconds, there is no need to write at that precision. +For better performance, use the coarsest precision possible for timestamps. + +_Specify timestamp precision when [writing to InfluxDB](/influxdb/v2.5/write-data/#timestamp-precision)._ + +## Use gzip compression + +Use gzip compression to speed up writes to InfluxDB and reduce network bandwidth. +Benchmarks have shown up to a 5x speed improvement when data is compressed. + +{{< tabs-wrapper >}} +{{% tabs %}} +[Telegraf](#) +[Client libraries](#) +[InfluxDB API](#) +{{% /tabs %}} +{{% tab-content %}} + +### Enable gzip compression in Telegraf + +In the `influxdb_v2` output plugin configuration in your `telegraf.conf`, set the +`content_encoding` option to `gzip`: + +```toml +[[outputs.influxdb_v2]] + urls = ["http://localhost:8086"] + # ... + content_encoding = "gzip" +``` +{{% /tab-content %}} +{{% tab-content %}} + +### Enable gzip compression in InfluxDB client libraries + +Each [InfluxDB client library](/influxdb/v2.5/api-guide/client-libraries/) provides +options for compressing write requests or enforces compression by default. +The method for enabling compression is different for each library. +For specific instructions, see the [InfluxDB client libraries documentation](/influxdb/v2.5/api-guide/client-libraries/). +{{% /tab-content %}} +{{% tab-content %}} + +### Use gzip compression with the InfluxDB API + +When using the InfluxDB API `/api/v2/write` endpoint to write data, compress the data with `gzip` and set the `Content-Encoding` +header to `gzip`. + +```sh +{{% get-shared-text "api/v2.0/write/write-compress.sh" %}} +``` +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## Synchronize hosts with NTP + +Use the Network Time Protocol (NTP) to synchronize time between hosts. +If a timestamp isn't included in line protocol, InfluxDB uses its host's local +time (in UTC) to assign timestamps to each point. +If a host's clocks isn't synchronized with NTP, timestamps may be inaccurate. + +## Write multiple data points in one request + +To write multiple lines in one request, each line of line protocol must be delimited by a new line (`\n`). + +## Rate limiting + +Use the [`influx write`](/influxdb/v2.5/reference/cli/influx/write/) `--rate-limit` flag to control the rate of writes. +Use one of the following string formats to specify the rate limit: + +- `COUNT(B|kB|MB)`, or +- `COUNT(B|kB|MB)/TIME(s|sec|m|min)` + +where `COUNT` is a decimal number and `TIME` is a positive whole number. +Spaces in the value are ignored. +For example: "5MB / 5min" can be also expressed as `17476.266666667Bs`, `1MB/1min`, `1MB/min`, `1MBmin` or `1MBm`. +If the rate limit format is invalid, `influx write` prints out the format and an exact regular expression. +The `--rate-limit` flag can be also used with [`influx write dryrun`](/influxdb/v2.5/reference/cli/influx/write/dryrun/). + +{{% cloud %}} +By default, the free tier rate limit in {{< cloud-name "short" >}} is `1MB/min`. +{{% /cloud %}} diff --git a/content/influxdb/v2.5/write-data/best-practices/resolve-high-cardinality.md b/content/influxdb/v2.5/write-data/best-practices/resolve-high-cardinality.md new file mode 100644 index 000000000..736ef0e4d --- /dev/null +++ b/content/influxdb/v2.5/write-data/best-practices/resolve-high-cardinality.md @@ -0,0 +1,133 @@ +--- +title: Resolve high series cardinality +description: > + Reduce high series cardinality in InfluxDB. If reads and writes to InfluxDB have started to slow down, you may have high cardinality. Find the source of high cardinality and adjust your schema to resolve high cardinality issues. +menu: + influxdb_2_5: + name: Resolve high cardinality + weight: 202 + parent: write-best-practices +--- + +If reads and writes to InfluxDB have started to slow down, high [series cardinality](/influxdb/v2.5/reference/glossary/#series-cardinality) (too many series) may be causing memory issues. {{% cloud-only %}}Cardinality can also cause writes to fail if it exceeds your [plan’s adjustable service quota](/influxdb/cloud/account-management/limits/).{{% /cloud-only %}} + +Take steps to understand and resolve high series cardinality. + +1. [Learn the causes of high cardinality](#learn-the-causes-of-high-series-cardinality) +2. [Measure series cardinality](#measure-series-cardinality) +3. [Resolve high cardinality](#resolve-high-cardinality) + +## Learn the causes of high series cardinality + +{{% oss-only %}} + + InfluxDB indexes the following data elements to speed up reads: + - [measurement](/influxdb/v2.5/reference/glossary/#measurement) + - [tags](/influxdb/v2.5/reference/glossary/#tag) + +{{% /oss-only %}} +{{% cloud-only %}} + + InfluxDB indexes the following data elements to speed up reads: + - [measurement](/influxdb/v2.5/reference/glossary/#measurement) + - [tags](/influxdb/v2.5/reference/glossary/#tag) + - [field keys](/influxdb/cloud/reference/glossary/#field-key) + +{{% /cloud-only %}} + +Each unique set of indexed data elements forms a [series key](/influxdb/v2.5/reference/glossary/#series-key). +[Tags](/influxdb/v2.5/reference/glossary/#tag) containing highly variable information like unique IDs, hashes, and random strings lead to a large number of [series](/influxdb/v2.5/reference/glossary/#series), also known as high [series cardinality](/influxdb/v2.5/reference/glossary/#series-cardinality). +High series cardinality is a primary driver of high memory usage for many database workloads. + +## Measure series cardinality + +Use the following to measure series cardinality of your buckets: +- [`influxdb.cardinality()`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/cardinality): Flux function that returns the number of unique [series keys](/influxdb/v2.5/reference/glossary/#series) in your data. + +- [`SHOW SERIES CARDINALITY`](/{{< latest "influxdb" "v1" >}}/query_language/spec/#show-series-cardinality): InfluxQL command that returns the number of unique [series keys](/influxdb/v2.5/reference/glossary/#series) in your data. + +## Resolve high cardinality + +To resolve high series cardinality, complete the following steps (for multiple buckets if applicable): + +1. [Review tags](#review-tags). +2. [Improve your schema](#improve-your-schema). +3. [Delete high cardinality data](#delete-data-to-reduce-high-cardinality). + +## Review tags + +Review your tags to ensure each tag **does not contain** unique values for most entries: + +- Scan your tags for [common tag issues](#common-tag-issues). +- Use the example Flux query below to [count unique tag values](#count-unique-tag-values). + +### Common tag issues + +Look for the following common issues, which often cause many unique tag values: + +- **Writing log messages to tags**. If a log message includes a unique timestamp, pointer value, or unique string, many unique tag values are created. +- **Writing timestamps to tags**. Typically done by accident in client code. +- **Unique tag values that grow over time** For example, a user ID tag may work at a small startup, but may begin to cause issues when the company grows to hundreds of thousands of users. + +### Count unique tag values + +The following example Flux query shows you which tags are contributing the most to cardinality. Look for tags with values orders of magnitude higher than others. + +```js +// Count unique values for each tag in a bucket +import "influxdata/influxdb/schema" + +cardinalityByTag = (bucket) => schema.tagKeys(bucket: bucket) + |> map( + fn: (r) => ({ + tag: r._value, + _value: if contains(set: ["_stop", "_start"], value: r._value) then + 0 + else + (schema.tagValues(bucket: bucket, tag: r._value) + |> count() + |> findRecord(fn: (key) => true, idx: 0))._value, + }), + ) + |> group(columns: ["tag"]) + |> sum() + +cardinalityByTag(bucket: "example-bucket") +``` + +{{% note %}} + If you're experiencing runaway cardinality, the query above may timeout. If you experience a timeout, run the queries below—one at a time. +{{% /note %}} + +1. Generate a list of tags: + + ```js + // Generate a list of tags + import "influxdata/influxdb/schema" + + schema.tagKeys(bucket: "example-bucket") + ``` + +2. Count unique tag values for each tag: + + ```js + // Run the following for each tag to count the number of unique tag values + import "influxdata/influxdb/schema" + + tag = "example-tag-key" + + schema.tagValues(bucket: "my-bucket", tag: tag) + |> count() + ``` + +These queries should help identify the sources of high cardinality in each of your buckets. To determine which specific tags are growing, check the cardinality again after 24 hours to see if one or more tags have grown significantly. + +## Improve your schema + +To minimize cardinality in the future, design your schema for easy and performant querying. +Review [best practices for schema design](/influxdb/v2.5/write-data/best-practices/schema-design/). + +## Delete data to reduce high cardinality + +Consider whether you need the data that is causing high cardinality. +If you no longer need this data, you can [delete the whole bucket](/influxdb/v2.5/organizations/buckets/delete-bucket/) or [delete a range of data](/influxdb/v2.5/write-data/delete-data/). diff --git a/content/influxdb/v2.5/write-data/best-practices/schema-design.md b/content/influxdb/v2.5/write-data/best-practices/schema-design.md new file mode 100644 index 000000000..c43b7ac96 --- /dev/null +++ b/content/influxdb/v2.5/write-data/best-practices/schema-design.md @@ -0,0 +1,252 @@ +--- +title: InfluxDB schema design +description: > + Design your schema for simpler and more performant queries. +menu: + influxdb_2_5: + name: Schema design + weight: 201 + parent: write-best-practices +related: + - /resources/videos/data-model-building-blocks/ +--- + +Design your [schema](/influxdb/v2.5/reference/glossary/#schema) for simpler and more performant queries. +Follow design guidelines to make your schema easy to query. +Learn how these guidelines lead to more performant queries. + +- [Design to query](#design-to-query) + - [Keep measurements and keys simple](#keep-measurements-and-keys-simple) +- [Use tags and fields](#use-tags-and-fields) + - [Use fields for unique and numeric data](#use-fields-for-unique-and-numeric-data) + - [Use tags to improve query performance](#use-tags-to-improve-query-performance) + - [Keep tags simple](#keep-tags-simple) + +{{% note %}} + +Good schema design can prevent high series cardinality, resulting in better performing queries. If you notice data reads and writes slowing down or want to learn how cardinality affects performance, see how to [resolve high cardinality](/influxdb/v2.5/write-data/best-practices/resolve-high-cardinality/). + +{{% /note %}} + +## Design to query + +The schemas below demonstrate [measurements](/influxdb/v2.5/reference/glossary/#measurement), [tag keys](/influxdb/v2.5/reference/glossary/#tag-key), and [field keys](/influxdb/v2.5/reference/glossary/#field-key) that are easy to query. + +| measurement | tag key | tag key | field key | field key | +|----------------------|-----------|---------|-----------|-------------| +| airSensor | sensorId | station | humidity | temperature | +| waterQualitySensor | sensorId | station | pH | temperature | + +The `airSensor` and `waterQualitySensor` schemas illustrate the following guidelines: +- Each measurement is a simple name that describes a schema. +- Keys [don't repeat within a schema](#avoid-duplicate-names-for-tags-and-fields). +- Keys [don't use reserved keywords or special characters](#avoid-keywords-and-special-characters-in-keys). +- Tags (`sensorId` and `station`) [store metadata common across many data points](#use-tags-to-improve-query-performance). +- Fields (`humidity`, `pH`, and `temperature`) [store numeric data](#use-fields-for-unique-and-numeric-data). +- Fields [store unique or highly variable](#use-fields-for-unique-and-numeric-data) data. +- Measurements and keys [don't contain data](#keep-measurements-and-keys-simple); tag values and field values will store data. + +The following points (formatted as line protocol) use the `airSensor` and `waterQualitySensor` schemas: + +``` +airSensor,sensorId=A0100,station=Harbor humidity=35.0658,temperature=21.667 1636729543000000000 +waterQualitySensor,sensorId=W0101,station=Harbor pH=6.1,temperature=16.103 1472515200000000000 +``` + +### Keep measurements and keys simple + +Store data in [tag values](/influxdb/v2.5/reference/glossary/#tag-value) or [field values](/influxdb/v2.5/reference/glossary/#field-value), not in [tag keys](/influxdb/v2.5/reference/glossary/#tag-key), [field keys](/influxdb/v2.5/reference/glossary/#field-key), or [measurements](/influxdb/v2.5/reference/glossary/#measurement). If you design your schema to store data in tag and field values, +your queries will be easier to write and more efficient. + +{{% oss-only %}} + +In addition, you'll keep cardinality low by not creating measurements and keys as you write data. +To learn more about the performance impact of high series cardinality, see how to [resolve high cardinality](/influxdb/v2.5/write-data/best-practices/resolve-high-cardinality/). + +{{% /oss-only %}} + +#### Compare schemas + +Compare the following valid schemas represented by line protocol. + +**Recommended**: the following schema stores metadata in separate `crop`, `plot`, and `region` tags. The `temp` field contains variable numeric data. + +##### {id="good-measurements-schema"} +``` +Good Measurements schema - Data encoded in tags (recommended) +------------- +weather_sensor,crop=blueberries,plot=1,region=north temp=50.1 1472515200000000000 +weather_sensor,crop=blueberries,plot=2,region=midwest temp=49.8 1472515200000000000 +``` + +**Not recommended**: the following schema stores multiple attributes (`crop`, `plot` and `region`) concatenated (`blueberries.plot-1.north`) within the measurement, similar to Graphite metrics. + +##### {id="bad-measurements-schema"} +``` +Bad Measurements schema - Data encoded in the measurement (not recommended) +------------- +blueberries.plot-1.north temp=50.1 1472515200000000000 +blueberries.plot-2.midwest temp=49.8 1472515200000000000 +``` + +**Not recommended**: the following schema stores multiple attributes (`crop`, `plot` and `region`) concatenated (`blueberries.plot-1.north`) within the field key. + +##### {id="bad-keys-schema"} +``` +Bad Keys schema - Data encoded in field keys (not recommended) +------------- +weather_sensor blueberries.plot-1.north.temp=50.1 1472515200000000000 +weather_sensor blueberries.plot-2.midwest.temp=49.8 1472515200000000000 +``` + +#### Compare queries + +Compare the following queries of the [_Good Measurements_](#good-measurements-schema) and [_Bad Measurements_](#bad-measurements-schema) schemas. +The [Flux](/{{< latest "flux" >}}/) queries calculate the average `temp` for blueberries in the `north` region + +**Easy to query**: [_Good Measurements_](#good-measurements-schema) data is easily filtered by `region` tag values, as in the following example. + +```js +// Query *Good Measurements*, data stored in separate tags (recommended) +from(bucket:"example-bucket") + |> range(start:2016-08-30T00:00:00Z) + |> filter(fn: (r) => r._measurement == "weather_sensor" and r.region == "north" and r._field == "temp") + |> mean() +``` + +**Difficult to query**: [_Bad Measurements_](#bad-measurements-schema) requires regular expressions to extract `plot` and `region` from the measurement, as in the following example. + +```js +// Query *Bad Measurements*, data encoded in the measurement (not recommended) +from(bucket:"example-bucket") + |> range(start:2016-08-30T00:00:00Z) + |> filter(fn: (r) => r._measurement =~ /\.north$/ and r._field == "temp") + |> mean() +``` + +Complex measurements make some queries impossible. For example, calculating the average temperature of both plots is not possible with the [_Bad Measurements_](#bad-measurements-schema) schema. + +#### Keep keys simple + +In addition to keeping your keys free of data, follow these additional guidelines to make them easier to query: +- [Avoid keywords and special characters](#avoid-keywords-and-special-characters-in-keys) +- [Avoid duplicate names for tags and fields](#avoid-duplicate-names-for-tags-and-fields) + +##### Avoid keywords and special characters in keys + +To simplify query writing, don't include reserved keywords or special characters in tag and field keys. +If you use [Flux keywords](/{{< latest "flux" >}}/spec/lexical-elements/#keywords) in keys, +then you'll have to wrap the keys in double quotes. +If you use non-alphanumeric characters in keys, then you'll have to use [bracket notation](/{{< latest "flux" >}}/data-types/composite/record/#bracket-notation) in [Flux]((/{{< latest "flux" >}}/). + +##### Avoid duplicate names for tags and fields + +Avoid using the same name for a [tag key](/influxdb/v2.5/reference/glossary/#tag-key) and a [field key](/influxdb/v2.5/reference/glossary/#field-key) within the same schema. +Your query results may be unpredictable if you have a tag and a field with the same name. + +{{% cloud-only %}} + +{{% note %}} +Use [explicit bucket schemas]() to enforce unique tag and field keys within a schema. +{{% /note %}} + +{{% /cloud-only %}} + +## Use tags and fields + +[Tag values](/influxdb/v2.5/reference/glossary/#tag-value) are indexed and [field values](/influxdb/v2.5/reference/glossary/#field-value) aren't. +This means that querying tags is more performant than querying fields. +Your queries should guide what you store in tags and what you store in fields. + +### Use fields for unique and numeric data + +- Store unique or frequently changing values as field values. +- Store numeric values as field values. ([Tags](/influxdb/v2.5/reference/glossary/#tag-value) only store strings). + +### Use tags to improve query performance + +- Store values as tag values if they can be reasonably indexed. +- Store values as [tag values](/influxdb/v2.5/reference/glossary/#tag-value) if the values are used in [filter()]({{< latest "flux" >}}/universe/filter/) or [group()](/{{< latest "flux" >}}/universe/group/) functions. +- Store values as tag values if the values are shared across multiple data points, i.e. metadata about the field. + +Because InfluxDB indexes tags, the query engine doesn't need to scan every record in a bucket to locate a tag value. +For example, consider a bucket that stores data about thousands of users. With `userId` stored in a [field](/influxdb/v2.5/reference/glossary/#field), a query for user `abcde` requires InfluxDB to scan `userId` in every row. + +```js +from(bucket: "example-bucket") + |> range(start: -7d) + |> filter(fn: (r) => r._field == "userId" and r._value == "abcde") +``` + +To retrieve data more quickly, filter on a tag to reduce the number of rows scanned. +The tag should store data that can be reasonably indexed. +The following query filters by the `company` tag to reduce the number of rows scanned for `userId`. + +```js +from(bucket: "example-bucket") + |> range(start: -7d) + |> filter(fn: (r) => r.company == "Acme") + |> filter(fn: (r) => r._field == "userId" and r._value == "abcde") +``` + +### Keep tags simple + +Use one tag for each data attribute. +If your source data contains multiple data attributes in a single parameter, +split each attribute into its own tag. +When each tag represents one attribute (not multiple concatenated attributes) of your data, +you'll reduce the need for regular expressions in your queries. +Without regular expressions, your queries will be easier to write and more performant. + +#### Compare schemas + +Compare the following valid schemas represented by line protocol. + +**Recommended**: the following schema splits location data into `plot` and `region` tags. + +##### {id="good-tags-schema"} +``` +Good Tags schema - Data encoded in multiple tags +------------- +weather_sensor,crop=blueberries,plot=1,region=north temp=50.1 1472515200000000000 +weather_sensor,crop=blueberries,plot=2,region=midwest temp=49.8 1472515200000000000 +``` + +**Not recommended**: the following schema stores multiple attributes (`plot` and `region`) concatenated within the `location` tag value (`plot-1.north`). + +##### {id="bad-tags-schema"} +``` +Bad Tags schema - Multiple data encoded in a single tag +------------- +weather_sensor,crop=blueberries,location=plot-1.north temp=50.1 1472515200000000000 +weather_sensor,crop=blueberries,location=plot-2.midwest temp=49.8 1472515200000000000 +``` + +#### Compare queries + +Compare queries of the [_Good Tags_](#good-tags-schema) and [_Bad Tags_](#bad-tags-schema) schemas. +The [Flux](/{{< latest "flux" >}}/) queries calculate the average `temp` for blueberries in the `north` region. + +**Easy to query**: [_Good Tags_](#good-tags-schema) data is easily filtered by `region` tag values, as in the following example. + +```js +// Query *Good Tags* schema, data encoded in multiple tags +from(bucket:"example-bucket") + |> range(start:2016-08-30T00:00:00Z) + |> filter(fn: (r) => r._measurement == "weather_sensor" and r.region == "north" and r._field == "temp") + |> mean() +``` + +**Difficult to query**: [_Bad Tags_](#bad-tags-schema) requires regular expressions to parse the complex `location` values, as in the following example. + +```js +// Query *Bad Tags* schema, multiple data encoded in a single tag +from(bucket:"example-bucket") + |> range(start:2016-08-30T00:00:00Z) + |> filter(fn: (r) => r._measurement == "weather_sensor" and r.location =~ /\.north$/ and r._field == "temp") + |> mean() +``` + +For an overview of the InfluxDB data model, watch the following video: + +{{< youtube 3qTTqsL27lI >}} \ No newline at end of file diff --git a/content/influxdb/v2.5/write-data/delete-data.md b/content/influxdb/v2.5/write-data/delete-data.md new file mode 100644 index 000000000..b31e9ce45 --- /dev/null +++ b/content/influxdb/v2.5/write-data/delete-data.md @@ -0,0 +1,185 @@ +--- +title: Delete data +list_title: Delete data +description: > + Use the `influx` CLI or the InfluxDB API `/api/v2/delete` endpoint to delete + data from an InfluxDB bucket. +menu: + influxdb_2_5: + name: Delete data + parent: Write data +weight: 107 +influxdb/v2.5/tags: [delete] +related: + - /influxdb/v2.5/reference/syntax/delete-predicate/ + - /influxdb/v2.5/reference/cli/influx/delete/ + - /influxdb/v2.5/organizations/buckets/delete-bucket/ +--- + +Use the [`influx` CLI](/influxdb/v2.5/reference/cli/influx/) or the InfluxDB API +[`/api/v2/delete`](/influxdb/v2.5/api/#operation/PostDelete) endpoint to delete +data from an InfluxDB bucket. + +- [Delete data using the influx CLI](#delete-data-using-the-influx-cli) +- [Delete data using the API](#delete-data-using-the-api) + +InfluxDB {{< current-version >}} supports deleting data by the following: + +- time range +- measurement (`_measurement`) +- tag +- {{% cloud-only %}}field (`_field`){{% /cloud-only %}} + +{{% oss-only %}} + +{{% warn %}} +#### Cannot delete data by field +InfluxDB {{< current-version >}} does not support deleting data **by field**. +{{% /warn %}} + +{{% /oss-only %}} + +{{% cloud-only %}} + +In InfluxDB Cloud, writes and deletes are asynchronous and eventually consistent. +Once InfluxDB validates your request and queues the delete, +it sends a _success_ response (HTTP `204` status code) as an acknowledgement. +To ensure that InfluxDB handles writes and deletes in the order you request them, wait for the acknowledgement before you send the next request. +Once InfluxDB executes a queued delete, the deleted data is no longer queryable, +but will remain on disk until the compaction service runs. + +{{% /cloud-only %}} + +{{% oss-only %}} + +Once a delete request completes successfully, the deleted data is no longer queryable, +but will remain on disk until the compaction service runs. + +{{% /oss-only %}} + +## Delete data using the influx CLI + +{{% note %}} +Use [InfluxDB CLI connection configurations](/influxdb/v2.5/reference/cli/influx/config/) +to provide your **InfluxDB host, organization, and API token**. +{{% /note %}} + +1. Use the [`influx delete` command](/influxdb/v2.5/reference/cli/influx/delete/) to delete points from InfluxDB. +2. Use the `--bucket` flag to specify which bucket to delete data from. +3. Use the `--start` and `--stop` flags to define the time range to delete data from. + Use [RFC3339 timestamps](/influxdb/v2.5/reference/glossary/#rfc3339-timestamp). +4. _(Optional)_ Use the `-p`, `--predicate` flag to include a [delete predicate](/influxdb/v2.5/reference/syntax/delete-predicate) + that identifies which points to delete. + + {{% warn %}} +Deleting data without a [delete predicate](/influxdb/v2.5/reference/syntax/delete-predicate) +deletes all data in the specified bucket with timestamps between the specified `start` and `stop` times. + {{% /warn %}} + +### Examples + +- [Delete points in a specific measurement with a specific tag value](#delete-points-in-a-specific-measurement-with-a-specific-tag-value) +- [Delete all points in a specified time range](#delete-all-points-in-a-specified-time-range) +- {{% cloud-only %}}[Delete points for a specific field in a specified time range](#delete-points-for-a-specific-field-in-a-specified-time-range){{% /cloud-only %}} + +##### Delete points in a specific measurement with a specific tag value +```sh +influx delete --bucket example-bucket \ + --start '1970-01-01T00:00:00Z' \ + --stop $(date +"%Y-%m-%dT%H:%M:%SZ") \ + --predicate '_measurement="example-measurement" AND exampleTag="exampleTagValue"' +``` + +##### Delete all points in a specified time range +```sh +influx delete --bucket example-bucket \ + --start 2020-03-01T00:00:00Z \ + --stop 2020-11-14T00:00:00Z +``` + +{{% cloud-only %}} + +##### Delete points for a specific field in a specified time range +```sh +influx delete --bucket example-bucket \ + --start 2022-01-01T00:00:00Z \ + --stop 2022-02-01T00:00:00Z \ + --predicate '_field="example-field"' +``` + +{{% /cloud-only %}} + +## Delete data using the API +Use the InfluxDB API [`/api/v2/delete` endpoint](/influxdb/v2.5/api/#operation/PostDelete) +to delete points from InfluxDB. + +{{< api-endpoint method="post" endpoint="http://localhost:8086/api/v2/delete" >}} + +Include the following: + +- **Request method:** `POST` +- **Headers:** + - **Authorization:** `Token` schema with your InfluxDB API token + - **Content-type:** `application/json` +- **Query parameters:** + - **org** or **orgID:** organization name or [organization ID](/influxdb/v2.5/organizations/view-orgs/#view-your-organization-id) + - **bucket** or **bucketID:** bucket name or [bucket ID](/influxdb/v2.5/organizations/buckets/view-buckets/) +- **Request body:** JSON object with the following fields: + {{< req type="key" >}} + - {{< req "\*" >}} **start:** earliest time to delete data from ([RFC3339](/influxdb/v2.5/reference/glossary/#rfc3339-timestamp)) + - {{< req "\*" >}} **stop:** latest time to delete data from ([RFC3339](/influxdb/v2.5/reference/glossary/#rfc3339-timestamp)) + - **predicate:** [delete predicate](/influxdb/v2.5/reference/syntax/delete-predicate) statement + + {{% warn %}} +Deleting data without a [delete predicate](/influxdb/v2.5/reference/syntax/delete-predicate) +deletes all data in the specified bucket with timestamps between the specified `start` and `stop` times. + {{% /warn %}} + +### Examples + +- [Delete points in a specific measurement with a specific tag value](#delete-points-in-a-specific-measurement-with-a-specific-tag-value-1) +- [Delete all points in a specified time range](#delete-all-points-in-a-specified-time-range-1) +- {{% cloud-only %}}[Delete points for a specific field in a specified time range](#delete-points-for-a-specific-field-in-a-specified-time-range-1){{% /cloud-only %}} + +##### Delete points in a specific measurement with a specific tag value +```sh +curl --request POST http://localhost:8086/api/v2/delete?org=example-org&bucket=example-bucket \ + --header 'Authorization: Token YOUR_API_TOKEN' \ + --header 'Content-Type: application/json' \ + --data '{ + "start": "2020-03-01T00:00:00Z", + "stop": "2020-11-14T00:00:00Z", + "predicate": "_measurement=\"example-measurement\" AND exampleTag=\"exampleTagValue\"" + }' +``` + +##### Delete all points in a specified time range +```sh +curl --request POST http://localhost:8086/api/v2/delete?org=example-org&bucket=example-bucket \ + --header 'Authorization: Token YOUR_API_TOKEN' \ + --header 'Content-Type: application/json' \ + --data '{ + "start": "2020-03-01T00:00:00Z", + "stop": "2020-11-14T00:00:00Z" + }' +``` + +{{% cloud-only %}} + +##### Delete points for a specific field in a specified time range +```sh +curl --request POST http://localhost:8086/api/v2/delete?org=example-org&bucket=example-bucket \ + --header 'Authorization: Token YOUR_API_TOKEN' \ + --header 'Content-Type: application/json' \ + --data '{ + "start": "2022-01-01T00:00:00Z", + "stop": "2022-02-01T00:00:00Z", + "predicate": "_field=\"example-field\"" + }' +``` + +{{% /cloud-only %}} + +_For more information, see the [`/api/v2/delete` endpoint documentation](/influxdb/v2.5/api/#operation/PostDelete)._ + +To delete a bucket see [Delete a bucket](/influxdb/v2.5/organizations/buckets/delete-bucket/). diff --git a/content/influxdb/v2.5/write-data/developer-tools/_index.md b/content/influxdb/v2.5/write-data/developer-tools/_index.md new file mode 100644 index 000000000..e2f46b4b8 --- /dev/null +++ b/content/influxdb/v2.5/write-data/developer-tools/_index.md @@ -0,0 +1,16 @@ +--- +title: Write data with developer tools +seotitle: Write data to InfluxDB with developer tools +list_title: Use developer tools +weight: 102 +description: > + Use developer tools such as the InfluxDB API and `influx` CLI to write data to InfluxDB. +menu: + influxdb_2_5: + name: Developer tools + parent: Write data +--- + +Write data to InfluxDB with developer tools. + +{{< children >}} diff --git a/content/influxdb/v2.5/write-data/developer-tools/api.md b/content/influxdb/v2.5/write-data/developer-tools/api.md new file mode 100644 index 000000000..52660068e --- /dev/null +++ b/content/influxdb/v2.5/write-data/developer-tools/api.md @@ -0,0 +1,57 @@ +--- +title: Write data with the InfluxDB API +weight: 206 +description: > + Use the `/api/v2/write` InfluxDB API endpoint to write data to InfluxDB. +menu: + influxdb_2_5: + name: InfluxDB API + parent: Developer tools +--- +Write data to InfluxDB using an HTTP request to the InfluxDB API `/api/v2/write` endpoint. +Use the `POST` request method and include the following in your request: + +| Requirement | Include by | +|:----------- |:---------- | +| Organization | Use the `org` query parameter in your request URL. | +| Bucket | Use the `bucket` query parameter in your request URL. | +| Timestamp precision | Use the [`precision`](/influxdb/v2.5/write-data/#timestamp-precision) query parameter in your request URL. Default is `ns`. | +| API token | Use the `Authorization: Token YOUR_API_TOKEN` header. | +| Line protocol | Pass as plain text in your request body. | + +#### Send a write request + +The URL in the examples depends on the version and location of your InfluxDB {{< current-version >}} instance. +Customize URLs in examples + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Curl](#curl) +[Node.js](#nodejs) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sh +{{< get-shared-text "api/v2.0/write/write.sh" >}} +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +{{< get-shared-text "api/v2.0/write/write.mjs" >}} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% note %}} +##### Use gzip compression with the InfluxDB API + +When using the InfluxDB API `/api/v2/write` endpoint to write data, compress the data with `gzip` and set the `Content-Encoding` +header to `gzip`. +Compression reduces network bandwidth, but increases server-side load. + +```sh +{{% get-shared-text "api/v2.0/write/write-compress.sh" %}} +``` +{{% /note %}} + +_For information about **InfluxDB API response codes**, see +[InfluxDB API Write documentation](/influxdb/v2.5/api/#operation/PostWrite)._ diff --git a/content/influxdb/v2.5/write-data/developer-tools/client-libraries.md b/content/influxdb/v2.5/write-data/developer-tools/client-libraries.md new file mode 100644 index 000000000..231c1633d --- /dev/null +++ b/content/influxdb/v2.5/write-data/developer-tools/client-libraries.md @@ -0,0 +1,15 @@ +--- +title: Write data with client libraries +weight: 204 +description: > + Use client libraries to write data to InfluxDB. +menu: + influxdb_2_5: + name: Client libraries + identifier: write-client-libraries + parent: Developer tools +--- + +Use language-specific client libraries to integrate with the InfluxDB v2 API. + +See [Client libraries reference](/influxdb/v2.5/api-guide/client-libraries/) for more information. diff --git a/content/influxdb/v2.5/write-data/developer-tools/csv.md b/content/influxdb/v2.5/write-data/developer-tools/csv.md new file mode 100644 index 000000000..80be90f7f --- /dev/null +++ b/content/influxdb/v2.5/write-data/developer-tools/csv.md @@ -0,0 +1,601 @@ +--- +title: Write CSV data to InfluxDB +description: > + Write CSV data with the [`influx write` command](/influxdb/cloud/reference/cli/influx/write/) or Flux. + Include annotations with the CSV data to determine how the data translates into + [line protocol](/influxdb/v2.5/reference/syntax/line-protocol/). +menu: + influxdb_2_5: + name: Write CSV data + parent: Developer tools +aliases: + - /influxdb/v2.5/write-data/csv/ +weight: 204 +related: + - /influxdb/v2.5/reference/syntax/line-protocol/ + - /influxdb/v2.5/reference/syntax/annotated-csv/ + - /influxdb/v2.5/reference/cli/influx/write/ +--- + +Write CSV data with the following methods: + +- [Upload a file or manually paste data in the UI](/influxdb/cloud/write-data/no-code/load-data/#load-data-by-uploading-a-csv-or-line-protocol-file) +- [influx write command](#influx-write-command) +- [Telegraf](#telegraf) +- [Flux](#flux) + +## influx write command + +Use the [`influx write` command](/influxdb/v2.5/reference/cli/influx/write/) to write CSV data +to InfluxDB. Include [Extended annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/extended/) +annotations to specify how the data translates into [line protocol](/influxdb/v2.5/reference/syntax/line-protocol/). +Include annotations in the CSV file or inject them using the `--header` flag of +the `influx write` command. + +##### On this page +- [CSV Annotations](#csv-annotations) +- [Inject annotation headers](#inject-annotation-headers) +- [Skip annotation headers](#skip-annotation-headers) +- [Process input as CSV](#process-input-as-csv) +- [Specify CSV character encoding](#specify-csv-character-encoding) +- [Skip rows with errors](#skip-rows-with-errors) +- [Advanced examples](#advanced-examples) + +##### Example write command +```sh +influx write -b example-bucket -f path/to/example.csv +``` + +##### example.csv +``` +#datatype measurement,tag,double,dateTime:RFC3339 +m,host,used_percent,time +mem,host1,64.23,2020-01-01T00:00:00Z +mem,host2,72.01,2020-01-01T00:00:00Z +mem,host1,62.61,2020-01-01T00:00:10Z +mem,host2,72.98,2020-01-01T00:00:10Z +mem,host1,63.40,2020-01-01T00:00:20Z +mem,host2,73.77,2020-01-01T00:00:20Z +``` + +##### Resulting line protocol +``` +mem,host=host1 used_percent=64.23 1577836800000000000 +mem,host=host2 used_percent=72.01 1577836800000000000 +mem,host=host1 used_percent=62.61 1577836810000000000 +mem,host=host2 used_percent=72.98 1577836810000000000 +mem,host=host1 used_percent=63.40 1577836820000000000 +mem,host=host2 used_percent=73.77 1577836820000000000 +``` + +{{% note %}} +To test the CSV to line protocol conversion process, use the `influx write dryrun` +command to print the resulting line protocol to stdout rather than write to InfluxDB. +{{% /note %}} + +{{% note %}} +##### "too many open files" errors + +When attempting to write large amounts of CSV data into InfluxDB, you might see an error like the following: + +``` +Error: Failed to write data: unexpected error writing points to database: [shard <#>] fcntl: too many open files. +``` + +To fix this error on Linux or macOS, run the following command to increase the number of open files allowed: + + ``` + ulimit -n 10000 + ``` + +macOS users, to persist the `ulimit` setting, follow the [recommended steps](https://unix.stackexchange.com/a/221988/471569) for your operating system version. + +{{% /note %}} + +## Telegraf + +Use CSV data format in Telegraf as a way to write CSV data to InfluxDB. + +For more information, see: + +- [CSV input data format](/telegraf/v1.19/data_formats/input/csv/) +- [Use Telegraf to write data](/influxdb/v2.5/write-data/no-code/use-telegraf/) + +## CSV Annotations +Use **CSV annotations** to specify which element of line protocol each CSV column +represents and how to format the data. CSV annotations are rows at the beginning +of a CSV file that describe column properties. + +The `influx write` command supports [Extended annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/extended) +which provides options for specifying how CSV data should be converted into line +protocol and how data is formatted. + +To write data to InfluxDB, data must include the following: + +- [measurement](/influxdb/v2.5/reference/syntax/line-protocol/#measurement) +- [field set](/influxdb/v2.5/reference/syntax/line-protocol/#field-set) +- [timestamp](/influxdb/v2.5/reference/syntax/line-protocol/#timestamp) _(Optional but recommended)_ +- [tag set](/influxdb/v2.5/reference/syntax/line-protocol/#tag-set) _(Optional)_ + +Use CSV annotations to specify which of these elements each column represents. + +## Write raw query results back to InfluxDB +Flux returns query results in [annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv/). +These results include all annotations necessary to write the data back to InfluxDB. + +## Inject annotation headers +If the CSV data you want to write to InfluxDB does not contain the annotations +required to properly convert the data to line protocol, use the `--header` flag +to inject annotation rows into the CSV data. + +```sh +influx write -b example-bucket \ + -f path/to/example.csv \ + --header "#constant measurement,birds" \ + --header "#datatype dateTime:2006-01-02,long,tag" +``` + +{{< flex >}} +{{% flex-content %}} +##### example.csv +``` +date,sighted,loc +2020-01-01,12,Boise +2020-06-01,78,Boise +2020-01-01,54,Seattle +2020-06-01,112,Seattle +2020-01-01,9,Detroit +2020-06-01,135,Detroit +``` +{{% /flex-content %}} +{{% flex-content %}} +##### Resulting line protocol +``` +birds,loc=Boise sighted=12i 1577836800000000000 +birds,loc=Boise sighted=78i 1590969600000000000 +birds,loc=Seattle sighted=54i 1577836800000000000 +birds,loc=Seattle sighted=112i 1590969600000000000 +birds,loc=Detroit sighted=9i 1577836800000000000 +birds,loc=Detroit sighted=135i 1590969600000000000 +``` +{{% /flex-content %}} +{{< /flex >}} + +#### Use files to inject headers +The `influx write` command supports importing multiple files in a single command. +Include annotations and header rows in their own file and import them with the write command. +Files are read in the order in which they're provided. + +```sh +influx write -b example-bucket \ + -f path/to/headers.csv \ + -f path/to/example.csv +``` + +{{< flex >}} +{{% flex-content %}} +##### headers.csv +``` +#constant measurement,birds +#datatype dateTime:2006-01-02,long,tag +``` +{{% /flex-content %}} +{{% flex-content %}} +##### example.csv +``` +date,sighted,loc +2020-01-01,12,Boise +2020-06-01,78,Boise +2020-01-01,54,Seattle +2020-06-01,112,Seattle +2020-01-01,9,Detroit +2020-06-01,135,Detroit +``` +{{% /flex-content %}} +{{< /flex >}} + +##### Resulting line protocol +``` +birds,loc=Boise sighted=12i 1577836800000000000 +birds,loc=Boise sighted=78i 1590969600000000000 +birds,loc=Seattle sighted=54i 1577836800000000000 +birds,loc=Seattle sighted=112i 1590969600000000000 +birds,loc=Detroit sighted=9i 1577836800000000000 +birds,loc=Detroit sighted=135i 1590969600000000000 +``` + +## Skip annotation headers +Some CSV data may include header rows that conflict with or lack the annotations +necessary to write CSV data to InfluxDB. +Use the `--skipHeader` flag to specify the **number of rows to skip** at the +beginning of the CSV data. + +```sh +influx write -b example-bucket \ + -f path/to/example.csv \ + --skipHeader=2 +``` + +You can then [inject new header rows](#inject-annotation-headers) to rename columns +and provide the necessary annotations. + +## Process input as CSV +The `influx write` command automatically processes files with the `.csv` extension as CSV files. +If your CSV file uses a different extension, use the `--format` flat to explicitly +declare the format of the input file. + +```sh +influx write -b example-bucket \ + -f path/to/example.txt \ + --format csv +``` + +{{% note %}} +The `influx write` command assumes all input files are line protocol unless they +include the `.csv` extension or you declare the `csv`. +{{% /note %}} + +## Specify CSV character encoding +The `influx write` command assumes CSV files contain UTF-8 encoded characters. +If your CSV data uses different character encoding, specify the encoding +with the `--encoding`. + +```sh +influx write -b example-bucket \ + -f path/to/example.csv \ + --encoding "UTF-16" +``` + +## Skip rows with errors +If a row in your CSV data is missing an +[element required to write to InfluxDB](/influxdb/v2.5/reference/syntax/line-protocol/#elements-of-line-protocol) +or data is incorrectly formatted, when processing the row, the `influx write` command +returns an error and cancels the write request. +To skip rows with errors, use the `--skipRowOnError` flag. + +```sh +influx write -b example-bucket \ + -f path/to/example.csv \ + --skipRowOnError +``` + +{{% warn %}} +Skipped rows are ignored and are not written to InfluxDB. +{{% /warn %}} + +Use the `--errors-file` flag to record errors to a file. +The error file identifies all rows that cannot be imported and includes error messages for debugging. +For example: + +```error : line 3: column 'a': '1.1' cannot fit into long data type +cpu,1.1 +``` + +## Advanced examples + +- [Define constants](#define-constants) +- [Annotation shorthand](#annotation-shorthand) +- [Ignore columns](#ignore-columns) +- [Use alternate numeric formats](#use-alternate-numeric-formats) +- [Use alternate boolean format](#use-alternate-boolean-format) +- [Use different timestamp formats](#use-different-timestamp-formats) + +--- + +### Define constants +Use the Extended annotated CSV [`#constant` annotation](/influxdb/v2.5/reference/syntax/annotated-csv/extended/#constant) +to add a column and value to each row in the CSV data. + +{{< flex >}} +{{% flex-content %}} +##### CSV with constants +``` +#constant measurement,example +#constant tag,source,csv +#datatype long,dateTime:RFC3339 +count,time +1,2020-01-01T00:00:00Z +4,2020-01-02T00:00:00Z +9,2020-01-03T00:00:00Z +18,2020-01-04T00:00:00Z +``` +{{% /flex-content %}} +{{% flex-content %}} +##### Resulting line protocol +``` +example,source=csv count=1 1577836800000000000 +example,source=csv count=4 1577923200000000000 +example,source=csv count=9 1578009600000000000 +example,source=csv count=18 1578096000000000000 +``` +{{% /flex-content %}} +{{< /flex >}} + +--- + +### Annotation shorthand +Extended annotated CSV supports [annotation shorthand](/influxdb/v2.5/reference/syntax/annotated-csv/extended/#annotation-shorthand), +which lets you define the **column label**, **datatype**, and **default value** in the column header. + +{{< flex >}} +{{% flex-content %}} +##### CSV with annotation shorthand +``` +m|measurement,count|long|0,time|dateTime:RFC3339 +example,1,2020-01-01T00:00:00Z +example,4,2020-01-02T00:00:00Z +example,,2020-01-03T00:00:00Z +example,18,2020-01-04T00:00:00Z +``` +{{% /flex-content %}} +{{% flex-content %}} +##### Resulting line protocol +``` +example count=1 1577836800000000000 +example count=4 1577923200000000000 +example count=0 1578009600000000000 +example count=18 1578096000000000000 +``` +{{% /flex-content %}} +{{< /flex >}} + +#### Replace column header with annotation shorthand +It's possible to replace the column header row in a CSV file with annotation +shorthand without modifying the CSV file. +This lets you define column data types and default values while writing to InfluxDB. + +To replace an existing column header row with annotation shorthand: + +1. Use the `--skipHeader` flag to ignore the existing column header row. +2. Use the `--header` flag to inject a new column header row that uses annotation shorthand. + +```sh +influx write -b example-bucket \ + -f example.csv \ + --skipHeader=1 + --header="m|measurement,count|long|0,time|dateTime:RFC3339" +``` + +{{< flex >}} +{{% flex-content %}} +##### Unmodified example.csv +``` +m,count,time +example,1,2020-01-01T00:00:00Z +example,4,2020-01-02T00:00:00Z +example,,2020-01-03T00:00:00Z +example,18,2020-01-04T00:00:00Z +``` +{{% /flex-content %}} +{{% flex-content %}} +##### Resulting line protocol +``` +example count=1i 1577836800000000000 +example count=4i 1577923200000000000 +example count=0i 1578009600000000000 +example count=18i 1578096000000000000 +``` +{{% /flex-content %}} +{{< /flex >}} + +--- + +### Ignore columns +Use the Extended annotated CSV [`#datatype ignored` annotation](/influxdb/v2.5/reference/syntax/annotated-csv/extended/#ignored) +to ignore columns when writing CSV data to InfluxDB. + +{{< flex >}} +{{% flex-content %}} +##### CSV data with ignored column +``` +#datatype measurement,long,time,ignored +m,count,time,foo +example,1,2020-01-01T00:00:00Z,bar +example,4,2020-01-02T00:00:00Z,bar +example,9,2020-01-03T00:00:00Z,baz +example,18,2020-01-04T00:00:00Z,baz +``` +{{% /flex-content %}} +{{% flex-content %}} +##### Resulting line protocol +``` +m count=1i 1577836800000000000 +m count=4i 1577923200000000000 +m count=9i 1578009600000000000 +m count=18i 1578096000000000000 +``` +{{% /flex-content %}} +{{< /flex >}} + +--- + +### Use alternate numeric formats +If your CSV data contains numeric values that use a non-default fraction separator (`.`) +or contain group separators, [define your numeric format](/influxdb/v2.5/reference/syntax/annotated-csv/extended/#double) +in the `double`, `long`, and `unsignedLong` datatype annotations. + +{{% note %}} +If your **numeric format separators** include a comma (`,`), wrap the column annotation in double +quotes (`""`) to prevent the comma from being parsed as a column separator or delimiter. +You can also [define a custom column separator](/influxdb/v2.5/reference/syntax/annotated-csv/extended/#define-custom-column-separator). +{{% /note %}} + +{{< tabs-wrapper >}} +{{% tabs %}} +[Floats](#) +[Integers](#) +[Uintegers](#) +{{% /tabs %}} +{{% tab-content %}} +{{< flex >}} +{{% flex-content %}} +##### CSV with non-default float values +``` +#datatype measurement,"double:.,",dateTime:RFC3339 +m,lbs,time +example,"1,280.7",2020-01-01T00:00:00Z +example,"1,352.5",2020-01-02T00:00:00Z +example,"1,862.8",2020-01-03T00:00:00Z +example,"2,014.9",2020-01-04T00:00:00Z +``` +{{% /flex-content %}} +{{% flex-content %}} +##### Resulting line protocol +``` +example lbs=1280.7 1577836800000000000 +example lbs=1352.5 1577923200000000000 +example lbs=1862.8 1578009600000000000 +example lbs=2014.9 1578096000000000000 +``` +{{% /flex-content %}} +{{< /flex >}} +{{% /tab-content %}} + +{{% tab-content %}} +{{< flex >}} +{{% flex-content %}} +##### CSV with non-default integer values +``` +#datatype measurement,"long:.,",dateTime:RFC3339 +m,lbs,time +example,"1,280.0",2020-01-01T00:00:00Z +example,"1,352.0",2020-01-02T00:00:00Z +example,"1,862.0",2020-01-03T00:00:00Z +example,"2,014.9",2020-01-04T00:00:00Z +``` +{{% /flex-content %}} +{{% flex-content %}} +##### Resulting line protocol +``` +example lbs=1280i 1577836800000000000 +example lbs=1352i 1577923200000000000 +example lbs=1862i 1578009600000000000 +example lbs=2014i 1578096000000000000 +``` +{{% /flex-content %}} +{{< /flex >}} +{{% /tab-content %}} + +{{% tab-content %}} +{{< flex >}} +{{% flex-content %}} +##### CSV with non-default uinteger values +``` +#datatype measurement,"unsignedLong:.,",dateTime:RFC3339 +m,lbs,time +example,"1,280.0",2020-01-01T00:00:00Z +example,"1,352.0",2020-01-02T00:00:00Z +example,"1,862.0",2020-01-03T00:00:00Z +example,"2,014.9",2020-01-04T00:00:00Z +``` +{{% /flex-content %}} +{{% flex-content %}} +##### Resulting line protocol +``` +example lbs=1280u 1577836800000000000 +example lbs=1352u 1577923200000000000 +example lbs=1862u 1578009600000000000 +example lbs=2014u 1578096000000000000 +``` +{{% /flex-content %}} +{{< /flex >}} +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +--- + +### Use alternate boolean format +Line protocol supports only [specific boolean values](/influxdb/v2.5/reference/syntax/line-protocol/#boolean). +If your CSV data contains boolean values that line protocol does not support, +[define your boolean format](/influxdb/v2.5/reference/syntax/annotated-csv/extended/#boolean) +in the `boolean` datatype annotation. + +{{< flex >}} +{{% flex-content %}} +##### CSV with non-default boolean values +``` +#datatype measurement,"boolean:y,Y,1:n,N,0",dateTime:RFC3339 +m,verified,time +example,y,2020-01-01T00:00:00Z +example,n,2020-01-02T00:00:00Z +example,1,2020-01-03T00:00:00Z +example,N,2020-01-04T00:00:00Z +``` +{{% /flex-content %}} +{{% flex-content %}} +##### Resulting line protocol +``` +example verified=true 1577836800000000000 +example verified=false 1577923200000000000 +example verified=true 1578009600000000000 +example verified=false 1578096000000000000 +``` +{{% /flex-content %}} +{{< /flex >}} + +--- + +### Use different timestamp formats +The `influx write` command automatically detects **RFC3339** and **number** formatted +timestamps when converting CSV to line protocol. +If using a different timestamp format, [define your timestamp format](/influxdb/v2.5/reference/syntax/annotated-csv/extended/#datetime) +in the `dateTime` datatype annotation. + +{{< flex >}} +{{% flex-content %}} +##### CSV with non-default timestamps +``` +#datatype measurement,dateTime:2006-01-02,field +m,time,lbs +example,2020-01-01,1280.7 +example,2020-01-02,1352.5 +example,2020-01-03,1862.8 +example,2020-01-04,2014.9 +``` +{{% /flex-content %}} +{{% flex-content %}} +##### Resulting line protocol +``` +example lbs=1280.7 1577836800000000000 +example lbs=1352.5 1577923200000000000 +example lbs=1862.8 1578009600000000000 +example lbs=2014.9 1578096000000000000 +``` +{{% /flex-content %}} +{{< /flex >}} + +## Flux + +Use the [csv.from()](/{{< latest "flux" >}}/stdlib/csv/from/) and [to()](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/to/) Flux functions to write an annotated CSV to the bucket of your choice. + +{{< youtube wPKZ9i0DulQ >}} + +The experimental [csv.from()](/{{< latest "flux" >}}/stdlib/csv/from/) function lets you write CSV from a URL. +The example below writes [NOAA water sample data](/influxdb/v2.5/reference/sample-data/#noaa-water-sample-data) to an example `noaa` bucket in an example organization: + +```js +import "experimental/csv" + +csv.from(url: "https://influx-testdata.s3.amazonaws.com/noaa.csv") + |> to(bucket: "noaa", org: "example-org") +``` + +{{% note %}} +#### Required annotations and columns +To write CSV data to InfluxDB with Flux, you must include _all_ of the following annotations and columns: + +- `datatype` +- `group` +- `default` + +See [annotations](/influxdb/v2.5/reference/syntax/annotated-csv/#annotations) for more information. +With Flux, you must also include a comma between the annotation name and the annotation values (this differs from the `influx write` command). +See an example of valid syntax for [annotated CSV in Flux](/influxdb/v2.5/reference/syntax/annotated-csv/#annotated-csv-in-flux). + +Required columns: + +- `_time` +- `_measurement` +- `_field` +- `_value` +{{% /note %}} diff --git a/content/influxdb/v2.5/write-data/developer-tools/influx-cli.md b/content/influxdb/v2.5/write-data/developer-tools/influx-cli.md new file mode 100644 index 000000000..4ac886913 --- /dev/null +++ b/content/influxdb/v2.5/write-data/developer-tools/influx-cli.md @@ -0,0 +1,56 @@ +--- +title: Write data with the influx CLI +weight: 205 +description: > + Use the `influx write` command to write data to InfluxDB from the command line. +menu: + influxdb_2_5: + name: Influx CLI + parent: Developer tools +related: + - /influxdb/v2.5/write-data/developer-tools/csv/ +--- + +To write data from the command line, use the [`influx write` command](/influxdb/v2.5/reference/cli/influx/write/). +Include the following in your command: + +| Requirement | Include by | +|:----------- |:---------- | +| Organization | Use the `-o`,`--org`, or `--org-id` flags. | +| Bucket | Use the `-b`, `--bucket`, or `--bucket-id` flags. | +| Precision | Use the `-p`, `--precision` flag. | +| API token | Set the `INFLUX_TOKEN` environment variable or use the `t`, `--token` flag. | +| Data | Write data using **line protocol** or **annotated CSV**. Pass a file with the `-f`, `--file` flag. | + +_See [Line protocol](/influxdb/v2.5/reference/syntax/line-protocol/) and [Annotated CSV](/influxdb/v2.5/reference/syntax/annotated-csv)_ + +#### Example influx write commands + +##### Write a single line of line protocol +```sh +influx write \ + -b bucketName \ + -o orgName \ + -p s \ + 'myMeasurement,host=myHost testField="testData" 1556896326' +``` + +##### Write line protocol from a file +```sh +influx write \ + -b bucketName \ + -o orgName \ + -p s \ + --format=lp + -f /path/to/line-protocol.txt +``` + +##### Write annotated CSV from a file +```sh +influx write \ + -b bucketName \ + -o orgName \ + -p s \ + --format=csv + -f /path/to/data.csv +``` diff --git a/content/influxdb/v2.5/write-data/developer-tools/scrape-prometheus-metrics.md b/content/influxdb/v2.5/write-data/developer-tools/scrape-prometheus-metrics.md new file mode 100644 index 000000000..d239566b5 --- /dev/null +++ b/content/influxdb/v2.5/write-data/developer-tools/scrape-prometheus-metrics.md @@ -0,0 +1,105 @@ +--- +title: Scrape Prometheus metrics +seotitle: Scrape Prometheus metrics into InfluxDB +weight: 205 +description: > + Use Telegraf, InfluxDB scrapers, or the `prometheus.scrape` Flux function to + scrape Prometheus-formatted metrics from an HTTP-accessible endpoint and store + them in InfluxDB. +menu: + influxdb_2_5: + name: Scrape Prometheus metrics + parent: Developer tools +related: + - /{{< latest "telegraf" >}}/plugins/#input-prometheus, Telegraf Prometheus input plugin + - /{{< latest "flux" >}}/prometheus/scrape-prometheus/, Scrape Prometheus metrics with Flux + - /{{< latest "flux" >}}/stdlib/experimental/prometheus/scrape/ + - /{{< latest "flux" >}}/prometheus/metric-types/ + - /influxdb/v2.5/reference/prometheus-metrics/ + - /influxdb/v2.5/write-data/no-code/scrape-data/ +influxdb/v2.5/tags: [prometheus, scraper] +--- + +Use [Telegraf](/{{< latest "telegraf" >}}/){{% oss-only %}}, [InfluxDB scrapers](/influxdb/v2.5/write-data/no-code/scrape-data/),{{% /oss-only %}} +or the [`prometheus.scrape` Flux function](/{{< latest "flux" >}}/stdlib/experimental/prometheus/scrape/) +to scrape Prometheus-formatted metrics from an HTTP-accessible endpoint and store them in InfluxDB. + +{{% oss-only %}} + +- [Use Telegraf](#use-telegraf) +- [Use an InfluxDB scraper](#use-an-influxdb-scraper) +- [Use prometheus.scrape()](#use-prometheusscrape) + +{{% /oss-only %}} +{{% cloud-only %}} + +- [Use Telegraf](#use-telegraf) +- [Use prometheus.scrape()](#use-prometheusscrape) + +{{% /cloud-only %}} + +## Use Telegraf +To use Telegraf to scrape Prometheus-formatted metrics from an HTTP-accessible +endpoint and write them to InfluxDB{{% cloud-only %}} Cloud{{% /cloud-only %}}, follow these steps: + +1. Add the [Prometheus input plugin](/{{< latest "telegraf" >}}/plugins/#input-prometheus) to your Telegraf configuration file. + 1. Set the `urls` to scrape metrics from. + 2. Set the `metric_version` configuration option to specify which + [metric parsing version](/influxdb/v2.5/reference/prometheus-metrics/) to use + _(version `2` is recommended)_. +2. Add the [InfluxDB v2 output plugin](/{{< latest "telegraf" >}}/plugins/#output-influxdb_v2) + to your Telegraf configuration file and configure it to to write to + InfluxDB{{% cloud-only %}} Cloud{{% /cloud-only %}}. + +##### Example telegraf.conf +```toml +# ... + +## Collect Prometheus formatted metrics +[[inputs.prometheus]] + urls = ["http://example.com/metrics"] + metric_version = 2 + +## Write Prometheus formatted metrics to InfluxDB +[[outputs.influxdb_v2]] + urls = ["http://localhost:8086"] + token = "$INFLUX_TOKEN" + organization = "example-org" + bucket = "example-bucket" + +# ... +``` + +{{% oss-only %}} + +## Use an InfluxDB scraper +InfluxDB scrapers automatically scrape Prometheus-formatted metrics from an +HTTP-accessible endpoint at a regular interval. +For information about setting up an InfluxDB scraper, see +[Scrape data using InfluxDB scrapers](/influxdb/v2.5/write-data/no-code/scrape-data/). + +{{% /oss-only %}} + +## Use prometheus.scrape() +To use the [`prometheus.scrape()` Flux function](/{{< latest "flux" >}}/stdlib/experimental/prometheus/scrape/) +to scrape Prometheus-formatted metrics from an HTTP-accessible endpoint and write +them to InfluxDB{{% cloud-only %}} Cloud{{% /cloud-only %}}, do the following in your Flux script: + +1. Import the [`experimental/prometheus` package](/{{< latest "flux" >}}/stdlib/experimental/prometheus/). +2. Use `prometheus.scrape()` and provide the URL to scrape metrics from. +3. Use [`to()`](/{{< latest "flux" >}}/stdlib/influxdata/influxdb/to/) and specify the InfluxDB{{% cloud-only %}} Cloud{{% /cloud-only %}} bucket to write + the scraped metrics to. + +##### Example Flux script +```js +import "experimental/prometheus" + +prometheus.scrape(url: "http://example.com/metrics") + |> to(bucket: "example-bucket") +``` + +4. (Optional) To scrape Prometheus metrics at regular intervals using Flux, add your Flux +scraping script as an [InfluxDB task](/{{< latest "influxdb" >}}/process-data/). + +_For information about scraping Prometheus-formatted metrics with `prometheus.scrape()`, +see [Scrape Prometheus metrics with Flux](/{{< latest "flux" >}}/prometheus/scrape-prometheus/)._ diff --git a/content/influxdb/v2.5/write-data/developer-tools/third-party-solutions.md b/content/influxdb/v2.5/write-data/developer-tools/third-party-solutions.md new file mode 100644 index 000000000..5998102c9 --- /dev/null +++ b/content/influxdb/v2.5/write-data/developer-tools/third-party-solutions.md @@ -0,0 +1,61 @@ +--- +title: Write data with third-party technologies +weight: 103 +description: > + Write data to InfluxDB using third-party developer tools. +aliases: +menu: + influxdb_2_5: + name: Third-party developer tools + parent: Developer tools +--- + +Configure third-party technologies to send line protocol directly to InfluxDB. + +## AWS Lambda via CloudFormation template + +Write to InfluxDB with AWS Lambda, Amazon Web Services' serverless offering. This example provides a CloudFormation template that collects earthquake from the [United States Geological Survey (USGS)](https://www.usgs.gov/) every hour and outputs it as line protocol into an InfluxDB bucket. + +The [template](https://influxdata-lambda.s3.amazonaws.com/GeoLambda.yml) contains the following sections: + +- Lines 1-20: Define variables that the template asks for when it's installed. +- Lines 21-120: Handle a quirk of Lambda deployments that requires the Lambda assets to be in your region before deployment. As there is no elegant workaround, these 100 lines create an S3 bucket in your account in the region you're creating the stack and copies in these resources. +- Lines 121-132: Define a role with basic permission to run the Lambda. +- Lines 133-144: Define a Python library layer. This layer packages the Python HTTP library, a Python S2 Geometry library, and the InfluxDB Python client library. +- Lines 145-165: Define the Lambda function, a short Python script zipped up in a file called `geo_lambda.zip`. +- Lines 166-188: Define an event rule with permission to run the Lambda every hour. + +### Deploy the template + +1. Log into your free AWS account and search for the CloudFormation service. Make sure you’re in the AWS region you want to deploy the Lambda to⁠. +2. Click **Create Stack**. +3. In the **Prerequisite - Prepare Template** section, select **Template is ready**. +4. In the **Specify template** section: + - Under **Template source**, select **Amazon S3 URL**. + - In the **Amazon S3 URL** field, enter the CloudFormation template URL: `https://influxdata-lambda.s3.amazonaws.com/GeoLambda.yml` +5. Click **Next**. +6. Enter a name in the **Stack name** field. +7. Enter the following InfluxDB details: + - Organization ID + - Bucket ID of the bucket the Lambda writes to + - Token with permission to write to the bucket + - InfluxDB URL +8. Do not alter or add to any other fields. Click **Next**. +9. Select the **I acknowledge that AWS CloudFormation might create IAM resources** check box. +10. Click **Create Stack**. + +After a few minutes, the stack deploys to your region. To view the new Lambda, select **Services > AWS Lambda**. On the Lambda functions page, you should see your new Lambda. The `CopyZipsFunction` is the helper copy function, and the `GeoPythonLambda` does the data collection and writing work: + +{{< img-hd src="/img/cloudformation1.png" alt="GeoPythonLambda data in InfluxDB" />}} + +### Verify your setup + +`GeoPythonLambda` should run every hour based on the AWS Rule we set up, but you should test and confirm it works. + +1. Click `GeoPythonLambda`, and then click **Test**. +2. The test requires an input definition, but this Lambda has no input requirements, so click through and save the default dataset. +3. If the test is successful, a green **Execution result: succeeded** message appears. + +With the data points written, when you log into your InfluxDB UI, you’ll be able to explore the geolocation earthquake data: + +{{< img-hd src="/img/cloudformation2.png" alt="GeoPythonLambda data in InfluxDB" />}} diff --git a/content/influxdb/v2.5/write-data/no-code/_index.md b/content/influxdb/v2.5/write-data/no-code/_index.md new file mode 100644 index 000000000..7a14933b6 --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/_index.md @@ -0,0 +1,17 @@ +--- +title: Write data to InfluxDB without coding +weight: 101 +description: > + Use existing tools to write data to InfluxDB without writing code. +aliases: + - /influxdb/v2.5/collect-data/advanced-telegraf + - /influxdb/v2.5/collect-data/use-telegraf +menu: + influxdb_2_5: + name: No-code solutions + parent: Write data +--- + +The following options let you write data to InfluxDB without writing any code. Some options require a minimal amount of configuration. + +{{< children >}} diff --git a/content/influxdb/v2.5/write-data/no-code/load-data.md b/content/influxdb/v2.5/write-data/no-code/load-data.md new file mode 100644 index 000000000..6918d6ac2 --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/load-data.md @@ -0,0 +1,94 @@ +--- +title: Load data from sources in the InfluxDB user interface (UI) +seotitle: Load data source in UI +list_title: Load data source in UI +weight: 101 +description: > + Load data from sources in the InfluxDB user interface (UI). Choose from popular client libraries (such as Python, Ruby, Scala, and more!) or load data with a Telegraf plugin (like MQTT Consumer, MySQL, File, and many more!). +menu: + influxdb_2_5: + name: Load data source in UI + parent: Write data +--- + +Load data from the following sources in the InfluxDB user interface (UI): + +- [CSV or line protocol file](#load-csv-or-line-protocol-in-ui) +- [Line protocol](#load-data-using-line-protocol) +- [Client libraries](#load-data-from-a-client-library-in-the-ui) +- [Telegraf plugins](#load-data-from-a-telegraf-plugin-in-the-ui) +- {{% cloud-only %}}[Native MQTT subscriptions](#set-up-a-native-mqtt-subscription){{% /cloud-only %}} + +### Load CSV or line protocol in UI + +Load CSV or line protocol data by uploading a file or pasting the data manually into the UI. + +1. In the navigation menu on the left, click **Load Data** > **Sources**. + + {{< nav-icon "data" >}} + +2. Under **File Upload**, select the type of data to upload: + - **Annotated CSV**. Verify your CSV file follows the supported [annotated CSV](/influxdb/cloud/reference/syntax/annotated-csv/) syntax. + - **Line Protocol**. Verify your line protocol file adheres to the following conventions: + - Each line represents a data point. + - Each data point requires a: + - [*measurement*](/influxdb/cloud/reference/syntax/line-protocol/#measurement) + - [*field set*](/influxdb/cloud/reference/syntax/line-protocol/#field-set) + - (Optional) [*tag set*](/influxdb/cloud/reference/syntax/line-protocol/#tag-set) + - [*timestamp*](/influxdb/cloud/reference/syntax/line-protocol/#timestamp) + + For more information, see supported [line protocol](/influxdb/cloud/reference/syntax/line-protocol/) syntax. + +2. Select the bucket to write to. +4. Select the **Precision** in the dropdown menu. By default, the precision is set to nanoseconds. +5. Do one of the following: + - To upload file, drag and drop your file onto the UI, and then click **Write Data**. + - To enter data manually, select the **Enter Manually** tab, paste your data, and then click **Write Data**. + +### Load data from a client library in the UI + +1. In the navigation menu on the left, click **Load Data** > **Sources**. + + {{< nav-icon "data" >}} + +2. Do one of the following: + - Enter a specific client library to search for in the **Search data writing methods** field. + - Scroll down to browse available client libraries. +3. Click the client library to load data from. +4. Under **Code Sample Options**, you'll see a list of your InfluxDB [tokens](/influxdb/v2.3/reference/glossary/#token) and [buckets](/influxdb/v2.3/reference/glossary/#bucket). Select both an API token and a bucket to write your data to. The selected API token and bucket are automatically added to scripts on the page that you can use to initialize a client and write data. +5. Click the **Copy to Clipboard** buttons under a script to easily paste the script into your terminal or save the script to reuse for automation. +6. Run the scripts on the page to do the following as needed: + - Install the package, libraries, or client + - Write data + - Execute a Flux query + +### Load data from a Telegraf plugin in the UI + +1. In the navigation menu on the left, click **Load Data** > **Sources**. + + {{< nav-icon "data" >}} + +2. Do one of the following: + - Enter a specific Telegraf plugin to search for in the **Search data writing methods** field. + - Scroll down to **Telegraf Plugins** and browse available input plugins. +3. Click the plugin to load data from. The plugin details page opens. +4. Select one of the options from the **Use this plugin** dropdown: + - **Create a new configuration**: Enter a configuration name and select an output bucket, and then click **Continue Configuring**. + - **Add to an existing configuration**: Select an existing Telegraf configuration to add this plugin to, and then click **Add to Existing Config**. +5. Provide a **Telegraf Configuration Name** and an optional **Telegraf Configuration Description**. +6. Adjust configuration settings as needed. To find configuration settings for a specific plugin, see [Telegraf plugins](/telegraf/latest/plugins/). +7. Click **Save and Test**. +8. The **Test Your Configuration** page provides instructions for how to start + Telegraf using the generated configuration. + _See [Start Telegraf](/influxdb/cloud/write-data/no-code/use-telegraf/auto-config/#start-telegraf) below for detailed information about what each step does._ +9. Once Telegraf is running, click **Listen for Data** to confirm Telegraf is successfully sending data to InfluxDB. + Once confirmed, a **Connection Found!** message appears. +10. Click **Finish**. Your Telegraf configuration name and the associated bucket name appear in the list of Telegraf configurations. + +{{% cloud-only %}} + +### Set up an MQTT native subscription + +For more details about setting up native subscriptions, see [Set up native subscriptions](/influxdb/cloud/write-data/no-code/native-subscriptions). + +{{% /cloud-only %}} diff --git a/content/influxdb/v2.5/write-data/no-code/scrape-data/_index.md b/content/influxdb/v2.5/write-data/no-code/scrape-data/_index.md new file mode 100644 index 000000000..05057c34f --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/scrape-data/_index.md @@ -0,0 +1,31 @@ +--- +title: Scrape data using InfluxDB scrapers +weight: 103 +description: > + Scrape data from InfluxDB instances or remote endpoints using InfluxDB scrapers. + Create a scraper in the InfluxDB UI to collect metrics from a specified target. +aliases: + - /influxdb/v2.5/collect-data/scraper-metrics-endpoint + - /influxdb/v2.5/collect-data/scrape-data + - /influxdb/v2.5/write-data/scrape-data + - /influxdb/v2.5/write-data/scrapable-endpoints +influxdb/v2.5/tags: [scraper] +menu: + influxdb_2_5: + name: Scrape data + parent: No-code solutions +--- + +InfluxDB scrapers collect data from specified targets at regular intervals, +then write the scraped data to an InfluxDB bucket. +Scrapers can collect data from any HTTP(S)-accessible endpoint that provides data +in the [Prometheus data format](https://prometheus.io/docs/instrumenting/exposition_formats/). + +{{% cloud %}} +Scrapers are not available in {{< cloud-name "short" >}}. +{{% /cloud %}} + + +The following articles provide information about creating and managing InfluxDB data scrapers: + +{{< children >}} diff --git a/content/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/_index.md b/content/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/_index.md new file mode 100644 index 000000000..bb13bfd51 --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/_index.md @@ -0,0 +1,20 @@ +--- +title: Manage InfluxDB scrapers +description: Create, update, and delete InfluxDB data scrapers in the InfluxDB user interface. +aliases: + - /influxdb/v2.5/collect-data/scrape-data/manage-scrapers + - /influxdb/v2.5/write-data/scrape-data/manage-scrapers +menu: + influxdb_2_5: + name: Manage scrapers + parent: Scrape data +weight: 201 +influxdb/v2.5/tags: [scraper, prometheus] +related: + - /influxdb/v2.5/write-data/no-code/scrape-data/ + +--- + +The following articles walk through managing InfluxDB scrapers: + +{{< children >}} diff --git a/content/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/create-a-scraper.md b/content/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/create-a-scraper.md new file mode 100644 index 000000000..9e36ede39 --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/create-a-scraper.md @@ -0,0 +1,38 @@ +--- +title: Create an InfluxDB scraper +list_title: Create a scraper +description: Create an InfluxDB scraper that collects data from InfluxDB or a remote endpoint. +aliases: + - /influxdb/v2.5/collect-data/scrape-data/manage-scrapers/create-a-scraper + - /influxdb/v2.5/write-data/scrape-data/manage-scrapers/create-a-scraper +influxdb/v2.5/tags: [scraper] +menu: + influxdb_2_5: + name: Create a scraper + parent: Manage scrapers +weight: 301 +related: + - /influxdb/v2.5/write-data/no-code/scrape-data/ + +--- + +InfluxDB scrapers collect data from specified targets at regular intervals, +then write the scraped data to an InfluxDB bucket. +Scrapers can collect data from any HTTP(S)-accessible endpoint that provides data +in the [Prometheus data format](https://prometheus.io/docs/instrumenting/exposition_formats/). + +## Create a scraper in the InfluxDB UI +1. In the navigation menu on the left, select **Data** (**Load Data**) > **Scrapers**. + + {{< nav-icon "load data" >}} + +3. Click **{{< icon "plus" >}} Create Scraper**. +4. Enter a **Name** for the scraper. +5. Select a **Bucket** to store the scraped data. +6. Enter the **Target URL** to scrape. + The default URL value is `http://localhost:8086/metrics`, + which provides InfluxDB-specific metrics in the [Prometheus data format](https://prometheus.io/docs/instrumenting/exposition_formats/). +7. Click **Create**. + +The new scraper will begin scraping data after approximately 10 seconds, +then continue scraping in 10 second intervals. diff --git a/content/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/delete-a-scraper.md b/content/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/delete-a-scraper.md new file mode 100644 index 000000000..a4306a8da --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/delete-a-scraper.md @@ -0,0 +1,23 @@ +--- +title: Delete an InfluxDB scraper +list_title: Delete a scraper +description: Delete an InfluxDB scraper in the InfluxDB user interface. +aliases: + - /influxdb/v2.5/collect-data/scrape-data/manage-scrapers/delete-a-scraper + - /influxdb/v2.5/write-data/scrape-data/manage-scrapers/delete-a-scraper +menu: + influxdb_2_5: + name: Delete a scraper + parent: Manage scrapers +weight: 303 +--- + +Delete a scraper from the InfluxDB user interface (UI). + +## Delete a scraper from the InfluxDB UI +1. In the navigation menu on the left, select **Data** (**Load Data**) > **Scrapers**. + + {{< nav-icon "load data" >}} + +3. Hover over the scraper you want to delete and click the **{{< icon "delete" >}}** icon. +4. Click **Delete**. diff --git a/content/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/update-a-scraper.md b/content/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/update-a-scraper.md new file mode 100644 index 000000000..f48546485 --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/update-a-scraper.md @@ -0,0 +1,28 @@ +--- +title: Update an InfluxDB scraper +list_title: Update a scraper +description: Update an InfluxDB scraper that collects data from InfluxDB or a remote endpoint. +aliases: + - /influxdb/v2.5/collect-data/scrape-data/manage-scrapers/update-a-scraper + - /influxdb/v2.5/write-data/scrape-data/manage-scrapers/update-a-scraper +menu: + influxdb_2_5: + name: Update a scraper + parent: Manage scrapers +weight: 302 +--- + +Update a scraper in the InfluxDB user interface (UI). + +{{% note %}} +Scraper **Target URLs** and **Buckets** cannot be updated. +To modify either, [create a new scraper](/influxdb/v2.5/write-data/no-code/scrape-data/manage-scrapers/create-a-scraper). +{{% /note %}} + +## Update a scraper in the InfluxDB UI +1. In the navigation menu on the left, select **Data** (**Load Data**) > **Scrapers**. + + {{< nav-icon "load data" >}} + +3. Hover over the scraper you would like to update and click the **{{< icon "pencil" >}}** that appears next to the scraper name. +4. Enter a new name for the scraper. Press Return or click out of the name field to save the change. diff --git a/content/influxdb/v2.5/write-data/no-code/scrape-data/scrapable-endpoints.md b/content/influxdb/v2.5/write-data/no-code/scrape-data/scrapable-endpoints.md new file mode 100644 index 000000000..6b90b10b2 --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/scrape-data/scrapable-endpoints.md @@ -0,0 +1,45 @@ +--- +title: Create scrapable endpoints +seotitle: Create scrapable endpoints for InfluxDB +description: > + InfluxDB scrapers can collect data from any HTTP(S)-accessible endpoint that + returns data in the Prometheus data format. + This article provides links to information about the Prometheus data format + and tools that generate Prometheus-formatted metrics. +aliases: + - /influxdb/v2.5/collect-data/scrape-data/scrapable-endpoints +menu: + influxdb_2_5: + parent: Scrape data +weight: 202 +influxdb/v2.5/tags: [scraper, prometheus] +--- + +InfluxDB scrapers can collect data from any HTTP(S)-accessible endpoint that returns data +in the [Prometheus data format](https://prometheus.io/docs/instrumenting/exposition_formats/). +The links below provide information about the Prometheus data format and tools +and clients that generate Prometheus-formatted metrics. + +## Prometheus Node Exporter +The [Prometheus Node Exporter](https://github.com/prometheus/node_exporter) exposes +a wide variety of hardware- and kernel-related metrics for **\*nix** systems. + +##### Helpful links +[Monitoring linux host metrics with the Node Exporter](https://prometheus.io/docs/guides/node-exporter/) + +## Prometheus exporters and integrations +[Prometheus exporters and integrations](https://prometheus.io/docs/instrumenting/exporters/) +export Prometheus metrics from third-party systems or services. + +##### Helpful links +[List of third-party exporters](https://prometheus.io/docs/instrumenting/exporters/#third-party-exporters) +[Write a custom Prometheus exporter](https://prometheus.io/docs/instrumenting/writing_exporters/) + +## Prometheus client libraries +[Prometheus client libraries](https://prometheus.io/docs/instrumenting/clientlibs/) +instrument applications for each of their respective languages. +Application metrics are output to an HTTP(S) endpoint where they can be scraped. + +##### Helpful links +[Instrumenting a Go application for Prometheus](https://prometheus.io/docs/guides/go-application/) +[Writing Prometheus client libraries](https://prometheus.io/docs/instrumenting/writing_clientlibs/) diff --git a/content/influxdb/v2.5/write-data/no-code/third-party.md b/content/influxdb/v2.5/write-data/no-code/third-party.md new file mode 100644 index 000000000..eff899322 --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/third-party.md @@ -0,0 +1,92 @@ +--- +title: Write data with no-code third-party technologies +weight: 103 +description: > + Write data to InfluxDB using third-party technologies that do not require coding. +menu: + influxdb_2_5: + name: Third-party technologies + parent: No-code solutions +--- + + +A number of third-party technologies can be configured to send line protocol directly to InfluxDB. + + +If you're using any of the following technologies, check out the handy links below to configure these technologies to write data to InfluxDB (**no additional software to download or install**). + +{{% note %}} +Many third-party integrations are community contributions. If there's an integration missing from the list below, please [open a docs issue](https://github.com/influxdata/docs-v2/issues/new/choose) to let us know. +{{% /note %}} + +- (Write metrics and log events only) [Vector 0.9 or later](#configure-vector) + +- [Apache NiFi 1.8 or later](#configure-apache-nifi) + +- [OpenHAB 3.0 or later](#configure-openhab) + +- [Apache JMeter 5.2 or later](#configure-apache-jmeter) + +- [Apache Pulsar](#configure-apache-pulsar) + +- [FluentD 1.x or later](#configure-fluentd) + + +#### Configure Vector + +1. View the **Vector documentation**: + - For write metrics, [InfluxDB Metrics Sink](https://vector.dev/docs/reference/sinks/influxdb_metrics/) + - For log events, [InfluxDB Logs Sink](https://vector.dev/docs/reference/sinks/influxdb_logs/) +2. Under **Configuration**, click **v2** to view configuration settings. +3. Scroll down to **How It Works** for more detail: + - [InfluxDB Metrics Sink – How It Works ](https://vector.dev/docs/reference/sinks/influxdb_metrics/#how-it-works) + - [InfluxDB Logs Sink – How It Works](https://vector.dev/docs/reference/sinks/influxdb_logs/#how-it-works) + +#### Configure Apache NiFi + +See the _[InfluxDB Processors for Apache NiFi Readme](https://github.com/influxdata/nifi-influxdb-bundle#influxdb-processors-for-apache-nifi)_ for details. + +#### Configure OpenHAB + +See the _[InfluxDB Persistence Readme](https://github.com/openhab/openhab-addons/tree/master/bundles/org.openhab.persistence.influxdb)_ for details. + +#### Configure Apache JMeter + + + +To configure Apache JMeter, complete the following steps in InfluxDB and JMeter. + +##### In InfluxDB + +1. [Find the name of your organization](/influxdb/v2.5/organizations/view-orgs/) (needed to create a bucket and token). +2. [Create a bucket using the influx CLI](/influxdb/v2.5/organizations/buckets/create-bucket/#create-a-bucket-using-the-influx-cli) and name it `jmeter`. +3. [Create a token](/influxdb/v2.5/security/tokens/create-token/). + +##### In JMeter + +1. Create a [Backend Listener](https://jmeter.apache.org/usermanual/component_reference.html#Backend_Listener) using the _**InfluxDBBackendListenerClient**_ implementation. +2. In the **Backend Listener implementation** field, enter: + ``` + org.apache.jmeter.visualizers.backend.influxdb.influxdbBackendListenerClient + ``` +3. Under **Parameters**, specify the following: + - **influxdbMetricsSender**: + ``` + org.apache.jmeter.visualizers.backend.influxdb.HttpMetricsSender + ``` + - **influxdbUrl**: _(include the bucket and org you created in InfluxDB)_ + ``` + http://localhost:8086/api/v2/write?org=my-org&bucket=jmeter + ``` + - **application**: `InfluxDB2` + - **influxdbToken**: _your InfluxDB API token_ + - Include additional parameters as needed. +4. Click **Add** to add the _**InfluxDBBackendListenerClient**_ implementation. + +#### Configure Apache Pulsar + +See _[InfluxDB sink connector](https://pulsar.apache.org/docs/en/io-influxdb-sink/)_ for details. + +#### Configure FluentD + +See the _[influxdb-plugin-fluent Readme](https://github.com/influxdata/influxdb-plugin-fluent)_ for details. diff --git a/content/influxdb/v2.5/write-data/no-code/use-telegraf/_index.md b/content/influxdb/v2.5/write-data/no-code/use-telegraf/_index.md new file mode 100644 index 000000000..b3f455f1b --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/use-telegraf/_index.md @@ -0,0 +1,38 @@ +--- +title: Use Telegraf to write data +seotitle: Use the Telegraf agent to collect and write data +list_title: Use the Telegraf agent +weight: 101 +description: > + Use Telegraf to collect and write data to InfluxDB v2.5. + Create Telegraf configurations in the InfluxDB UI or manually configure Telegraf. +aliases: + - /influxdb/v2.5/collect-data/advanced-telegraf + - /influxdb/v2.5/collect-data/use-telegraf + - /influxdb/v2.5/write-data/use-telegraf/ +menu: + influxdb_2_5: + name: Telegraf (agent) + parent: No-code solutions +--- + +[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is InfluxData's +data collection agent for collecting and reporting metrics. +Its vast library of input plugins and "plug-and-play" architecture lets you quickly +and easily collect metrics from many different sources. +This article describes how to use Telegraf to collect and store data in InfluxDB v2.5. + +For a list of available plugins, see [Telegraf plugins](/{{< latest "telegraf" >}}/plugins/). + +#### Requirements +- **Telegraf 1.9.2 or greater**. + _For information about installing Telegraf, see the + [Telegraf Installation instructions](/{{< latest "telegraf" >}}//install/)._ + +## Configure Telegraf +Telegraf input and output plugins are enabled and configured in Telegraf's configuration file (`telegraf.conf`). +You have the following options for configuring Telegraf: + +{{< children >}} + +{{< influxdbu "telegraf-102" >}} diff --git a/content/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config.md b/content/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config.md new file mode 100644 index 000000000..12e37a853 --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config.md @@ -0,0 +1,136 @@ +--- +title: Automatically configure Telegraf +seotitle: Automatically configure Telegraf for InfluxDB v2.5 +description: > + Use the InfluxDB UI to automatically generate a Telegraf configuration, + then start Telegraf using the generated configuration file. +aliases: + - /influxdb/v2.5/collect-data/use-telegraf/auto-config + - /influxdb/v2.5/write-data/use-telegraf/auto-config +menu: + influxdb_2_5: + parent: Telegraf (agent) +weight: 201 +related: + - /influxdb/v2.5/telegraf-configs/create/ +--- + +The InfluxDB user interface (UI) can automatically create Telegraf configuration files based on user-selected Telegraf plugins. +This article describes how to create a Telegraf configuration in the InfluxDB UI and +start Telegraf using the generated configuration file. + +{{< youtube M8KP7FAb2L0 >}} + +{{% note %}} +_View the [requirements](/influxdb/v2.5/write-data/no-code/use-telegraf#requirements) +for using Telegraf with InfluxDB v2.5._ +{{% /note %}} + +## Create a Telegraf configuration + +1. Open the InfluxDB UI _(default: [localhost:8086](http://localhost:8086))_. +2. In the navigation menu on the left, select **Data** (**Load Data**) > **Telegraf**. + + {{< nav-icon "load data" >}} + +4. Click **{{< icon "plus" >}} Create Configuration**. +5. In the **Bucket** dropdown, select the bucket where Telegraf will store collected data. +6. Select one or more of the available plugin groups and click **Continue**. +7. Review the list of **Plugins to Configure** for configuration requirements. + Plugins listed with a {{< icon "check" >}} + require no additional configuration. + To configure a plugin or access plugin documentation, click the plugin name. +5. Provide a **Telegraf Configuration Name** and an optional **Telegraf Configuration Description**. +6. Adjust configuration settings as needed. To find configuration settings for a specific plugin, see [Telegraf plugins](/telegraf/latest/plugins/). +7. Click **Save and Test**. +8. The **Test Your Configuration** page provides instructions for how to start Telegraf using the generated configuration. + _See [Start Telegraf](#start-telegraf) below for detailed information about what each step does._ +9. Once Telegraf is running, click **Listen for Data** to confirm Telegraf is successfully sending data to InfluxDB. + Once confirmed, a **Connection Found!** message appears. +10. Click **Finish**. Your Telegraf configuration name and the associated bucket name appears in the list of Telegraf configurations. + + +### Windows + +If you plan to monitor a Windows host using the System plugin, you must complete the following steps. + +1. In the list of Telegraf configurations, double-click your + Telegraf configuration, and then click **Download Config**. +2. Open the downloaded Telegraf configuration file and replace the `[[inputs.processes]]` plugin with one of the following Windows plugins, depending on your Windows configuration: + + - [`[[inputs.win_perf_counters]]`](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters) + - [`[[inputs.win_services]]`](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_services) + +3. Save the file and place it in a directory that **telegraf.exe** can access. + + +## Start Telegraf + +Requests to the [InfluxDB v2 API](/influxdb/v2.5/reference/api/) must include an API token. +A token identifies specific permissions to the InfluxDB instance. + +### Configure your token as an environment variable + +1. Find your API token. _For information about viewing tokens, see [View tokens](/influxdb/v2.5/security/tokens/view-tokens/)._ + +2. To configure your API token as the `INFLUX_TOKEN` environment variable, run the command appropriate for your operating system and command-line tool: + +{{< tabs-wrapper >}} +{{% tabs %}} +[macOS or Linux](#) +[Windows](#) +{{% /tabs %}} + +{{% tab-content %}} +```sh +export INFLUX_TOKEN=YourAuthenticationToken +``` +{{% /tab-content %}} + +{{% tab-content %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[PowerShell](#) +[CMD](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```sh +$env:INFLUX_TOKEN = "YourAuthenticationToken" +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```sh +set INFLUX_TOKEN=YourAuthenticationToken +# Make sure to include a space character at the end of this command. +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +### Start the Telegraf service + +Start the Telegraf service using the `-config` flag to specify the location of the generated Telegraf configuration file. + +- For Windows, the location is always a local file path. +- For Linux and macOS, the location can be a local file path or URL. + +Telegraf starts using the Telegraf configuration pulled from InfluxDB API. + +{{% note %}} +InfluxDB host URLs and ports differ between InfluxDB OSS and InfluxDB Cloud. +For the exact command, see the Telegraf configuration **Setup Instructions** in the InfluxDB UI. +{{% /note %}} + +```sh +telegraf -config http://localhost:8086/api/v2/telegrafs/0xoX00oOx0xoX00o +``` + +## Manage Telegraf configurations + +For more information about managing Telegraf configurations in InfluxDB, see +[Telegraf configurations](/influxdb/v2.5/telegraf-configs/). diff --git a/content/influxdb/v2.5/write-data/no-code/use-telegraf/dual-write.md b/content/influxdb/v2.5/write-data/no-code/use-telegraf/dual-write.md new file mode 100644 index 000000000..18a0f02eb --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/use-telegraf/dual-write.md @@ -0,0 +1,60 @@ +--- +title: Dual write to InfluxDB OSS and InfluxDB Cloud +description: Write data to both OSS and Cloud simultaneously. +menu: + influxdb_2_5: + parent: Telegraf (agent) +weight: 201 +--- + +If you want to back up your data in two places, or if you're migrating from OSS to Cloud, you may want to set up dual write. + +Use Telegraf to write to both InfluxDB OSS and InfluxDB Cloud simultaneously. + +The sample configuration below uses: + - The [InfluxDB v2 output plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb_v2) twice: first pointing to the OSS instance and then to the cloud instance. + - Two different tokens, one for OSS and one for Cloud. You'll need to configure both tokens as environment variables (see [Configure your token as an environment variable](/influxdb/v2.5/write-data/no-code/use-telegraf/auto-config/#configure-your-token-as-an-environment-variable). + +Use the configuration below to write your data to both OSS and Cloud instances simultaneously. + +## Sample configuration + +``` +[[inputs.cpu]] +[[inputs.mem]] + +## Any other inputs, processors, aggregators that you want to include in your configuration. + +# Send data to InfluxDB OSS v2 +[[outputs.influxdb_v2]] + ## The URLs of the InfluxDB instance. + ## + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + ## urls exp: http://127.0.0.1:9999 + urls = ["http://localhost:8086"] + + ## OSS token for authentication. + token = "$INFLUX_TOKEN_OSS" + + ## Organization is the name of the organization you want to write to. It must already exist. + organization = "influxdata" + + ## Destination bucket to write to. + bucket = "telegraf" + +# Send data to InfluxDB Cloud - AWS West cloud instance + [[outputs.influxdb_v2]] + ## The URLs of the InfluxDB Cloud instance. + + urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] + + ## Cloud token for authentication. + token = "$INFLUX_TOKEN_CLOUD" + + ## Organization is the name of the organization you want to write to. It must already exist. + organization = "example@domain.com" + + ## Destination bucket to write into. + bucket = "telegraf" + ``` diff --git a/content/influxdb/v2.5/write-data/no-code/use-telegraf/manual-config.md b/content/influxdb/v2.5/write-data/no-code/use-telegraf/manual-config.md new file mode 100644 index 000000000..8af6982f3 --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/use-telegraf/manual-config.md @@ -0,0 +1,161 @@ +--- +title: Manually configure Telegraf +seotitle: Manually configure Telegraf for InfluxDB v2.5 +description: > + Update existing or create new Telegraf configurations to use the `influxdb_v2` + output plugin to write to InfluxDB v2.5. + Start Telegraf using the custom configuration. +aliases: + - /influxdb/v2.5/collect-data/use-telegraf/manual-config + - /influxdb/v2.5/write-data/use-telegraf/manual-config +menu: + influxdb_2_5: + parent: Telegraf (agent) +weight: 202 +influxdb/v2.5/tags: [manually, plugin, mqtt] +related: + - /{{< latest "telegraf" >}}/plugins// + - /influxdb/v2.5/telegraf-configs/create/ + - /influxdb/v2.5/telegraf-configs/update/ +--- + +Use the Telegraf `influxdb_v2` output plugin to collect and write metrics into an InfluxDB v2.5 bucket. +This article describes how to enable the `influxdb_v2` output plugin in new and existing Telegraf configurations, +then start Telegraf using the custom configuration file. + +{{< youtube qFS2zANwIrc >}} + +{{% note %}} +_View the [requirements](/influxdb/v2.5/write-data/no-code/use-telegraf#requirements) +for using Telegraf with InfluxDB v2.5._ +{{% /note %}} + +## Configure Telegraf input and output plugins +Configure Telegraf input and output plugins in the Telegraf configuration file (typically named `telegraf.conf`). +Input plugins collect metrics. +Output plugins define destinations where metrics are sent. + +_See [Telegraf plugins](/{{< latest "telegraf" >}}/plugins//) for a complete list of available plugins._ + +### Manually add Telegraf plugins + +To manually add any of the available [Telegraf plugins](/{{< latest "telegraf" >}}/plugins//), follow the steps below. + +1. Find the plugin you want to enable from the complete list of available [Telegraf plugins](/{{< latest "telegraf" >}}/plugins//). +2. Click **View** to the right of the plugin name to open the plugin page on GitHub. For example, view the MQTT plugin GitHub page [here](https://github.com/influxdata/telegraf/blob/release-1.14/plugins/inputs/mqtt_consumer/README.md). +3. Copy and paste the example configuration into your Telegraf configuration file (typically named `telegraf.conf`). + +### Enable and configure the InfluxDB v2 output plugin + +To send data to an InfluxDB v2.5 instance, enable in the +[`influxdb_v2` output plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb_v2/README.md) +in the `telegraf.conf`. + +To find an example InfluxDB v2 output plugin configuration in the UI: + +1. In the navigation menu on the left, select **Data (Load Data)** > **Telegraf**. + + {{< nav-icon "load data" >}} + +2. Click **InfluxDB Output Plugin**. +3. Click **Copy to Clipboard** to copy the example configuration or **Download Config** to save a copy. +4. Paste the example configuration into your `telegraf.conf` and specify the options below. + +The InfluxDB output plugin configuration contains the following options: + +##### urls +An array of URLs for your InfluxDB v2.5 instances. +See [InfluxDB URLs](/influxdb/v2.5/reference/urls/) for information about which URLs to use. +**{{< cloud-name "short">}} requires HTTPS**. + +##### token +Your InfluxDB v2.5 authorization token. +For information about viewing tokens, see [View tokens](/influxdb/v2.5/security/tokens/view-tokens/). + +{{% note %}} +###### Avoid storing tokens in `telegraf.conf` +We recommend storing your tokens by setting the `INFLUX_TOKEN` environment variable and including the environment variable in your configuration file. + +{{< tabs-wrapper >}} +{{% tabs %}} +[macOS or Linux](#) +[Windows](#) +{{% /tabs %}} + +{{% tab-content %}} +```sh +export INFLUX_TOKEN=YourAuthenticationToken +``` +{{% /tab-content %}} + +{{% tab-content %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[PowerShell](#) +[CMD](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```sh +$env:INFLUX_TOKEN = "YourAuthenticationToken" +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```sh +set INFLUX_TOKEN=YourAuthenticationToken +# Make sure to include a space character at the end of this command. +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +_See the [example `telegraf.conf` below](#example-influxdb_v2-configuration)._ +{{% /note %}} + +##### organization +The name of the organization that owns the target bucket. + +##### bucket +The name of the bucket to write data to. + +#### Example influxdb_v2 configuration +The example below illustrates an `influxdb_v2` configuration. + +```toml +# ... + +[[outputs.influxdb_v2]] + urls = ["http://localhost:8086"] + token = "$INFLUX_TOKEN" + organization = "example-org" + bucket = "example-bucket" + +# ... +``` + +{{% note %}} +##### Write to InfluxDB v1.x and v2.5 +If a Telegraf agent is already writing to an InfluxDB v1.x database, +enabling the InfluxDB v2 output plugin will write data to both v1.x and v2.5 instances. +{{% /note %}} + +## Add a custom Telegraf configuration to InfluxDB +To add a custom or manually configured Telegraf configuration to your collection +of Telegraf configurations in InfluxDB, use the [`influx telegrafs create`](/influxdb/v2.5/reference/cli/influx/telegrafs/create/) +or [`influx telegrafs update`](/influxdb/v2.5/reference/cli/influx/telegrafs/update/) commands. +For more information, see: + +- [Create a Telegraf configuration](/influxdb/v2.5/telegraf-configs/create/#use-the-influx-cli) +- [Update a Telegraf configuration](/influxdb/v2.5/telegraf-configs/update/#use-the-influx-cli) + +## Start Telegraf + +Start the Telegraf service using the `--config` flag to specify the location of your `telegraf.conf`. + +```sh +telegraf --config /path/to/custom/telegraf.conf +``` diff --git a/content/influxdb/v2.5/write-data/no-code/use-telegraf/use-telegraf-plugins/_index.md b/content/influxdb/v2.5/write-data/no-code/use-telegraf/use-telegraf-plugins/_index.md new file mode 100644 index 000000000..c624177c3 --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/use-telegraf/use-telegraf-plugins/_index.md @@ -0,0 +1,16 @@ +--- +title: Use Telegraf plugins +description: > + Use Telegraf plugins to capture and write metrics to InfluxDB. +aliases: + - /influxdb/v2.5/write-data/use-telegraf/use-telegraf-plugins/ +menu: + influxdb_2_5: + name: Use Telegraf plugins + parent: Telegraf (agent) +weight: 202 +--- + +The following articles guide you through step-by-step Telegraf configuration examples: + +{{< children >}} diff --git a/content/influxdb/v2.5/write-data/no-code/use-telegraf/use-telegraf-plugins/use-http-plugin.md b/content/influxdb/v2.5/write-data/no-code/use-telegraf/use-telegraf-plugins/use-http-plugin.md new file mode 100644 index 000000000..9cb039ff4 --- /dev/null +++ b/content/influxdb/v2.5/write-data/no-code/use-telegraf/use-telegraf-plugins/use-http-plugin.md @@ -0,0 +1,119 @@ +--- +title: Use the HTTP input plugin +seotitle: Use the Telegraf HTTP input plugin to write data to InfluxDB +list_title: HTTP input plugin +description: > + Write Citi Bike data to your InfluxDB instance with the HTTP plugin. +aliases: + - /influxdb/v2.5/write-data/use-telegraf/use-telegraf-plugins/use-http-plugin/ +menu: + influxdb_2_5: + name: HTTP input plugin + parent: Use Telegraf plugins +weight: 202 +--- + +This example walks through using the Telegraf HTTP input plugin to collect live metrics on Citi Bike stations in New York City. Live station data is available in JSON format from [NYC OpenData](https://data.cityofnewyork.us/NYC-BigApps/Citi-Bike-Live-Station-Feed-JSON-/p94q-8hxh). + +Configure [`influxdb` output plugin](/{{< latest "telegraf" >}}/plugins//#influxdb) to write metrics to your InfluxDB {{< current-version >}} instance. + +## Configure the HTTP Input plugin in your Telegraf configuration file + +To retrieve data from the Citi Bike URL endpoint, enable the `inputs.http` input plugin in your Telegraf configuration file. + +Specify the following options: + +### `urls` +One or more URLs to read metrics from. For this example, use `https://gbfs.citibikenyc.com/gbfs/en/station_status.json`. + +### `data_format` +The format of the data in the HTTP endpoints that Telegraf will ingest. For this example, use JSON. + + +## Add parser information to your Telegraf configuration + +Specify the following JSON-specific options. + +### JSON + +#### `json_query` +To parse only the relevant portion of JSON data, set the `json_query` option with a [GJSON](https://github.com/tidwall/gjson) path. The result of the query should contain a JSON object or an array of objects. +In this case, we don't want to parse the JSON query's `executionTime` at the beginning of the data, so we'll limit this to include only the data in the `stationBeanList` array. + +#### `tag_keys` +List of one or more JSON keys that should be added as tags. For this example, we'll use the tag keys `id`, `stationName`, `city`, and `postalCode`. + +#### `json_string_fields` +List the keys of fields that are in string format so that they can be parsed as strings. Here, the string fields are `statusValue`, `stAddress1`, `stAddress2`, `location`, and `landMark`. + +#### `json_time_key` +Key from the JSON file that creates the timestamp metric. In this case, we want to use the time that station data was last reported, or the `lastCommunicationTime`. If you don't specify a key, the time that Telegraf reads the data becomes the timestamp. + +#### `json_time_format` +The format used to interpret the designated `json_time_key`. This example uses [Go reference time format](https://golang.org/pkg/time/#Time.Format). For example, `Mon Jan 2 15:04:05 MST 2006`. + +#### `json_timezone` +The timezone We'll set this to the Unix TZ value where our bike data takes place, `America/New_York`. + + +#### Example configuration + + ```toml + [[inputs.http]] + #URL for NYC's Citi Bike station data in JSON format + urls = ["https://feeds.citibikenyc.com/stations/stations.json"] + + #Overwrite measurement name from default `http` to `citibikenyc` + name_override = "citibikenyc" + + #Exclude url and host items from tags + tagexclude = ["url", "host"] + + #Data from HTTP in JSON format + data_format = "json" + + #Parse `stationBeanList` array only + json_query = "stationBeanList" + + #Set station metadata as tags + tag_keys = ["id", "stationName", "city", "postalCode"] + + #Do not include station landmark data as fields + fielddrop = ["landMark"] + + #JSON values to set as string fields + json_string_fields = ["statusValue", "stAddress1", "stAddress2", "location", "landMark"] + + #Latest station information reported at `lastCommunicationTime` + json_time_key = "lastCommunicationTime" + + #Time is reported in Golang "reference time" format + json_time_format = "2006-01-02 03:04:05 PM" + + #Time is reported in Eastern Standard Time (EST) + json_timezone = "America/New_York" + ``` + + + +## Start Telegraf and verify data appears + +[Start the Telegraf service](/telegraf/v1.14/introduction/getting-started/). + +To test that the data is being sent to InfluxDB, run the following (replacing `telegraf.conf` with the path to your configuration file): + +``` +telegraf -config ~/telegraf.conf -test +``` + +This command should return line protocol that looks similar to the following: + + +``` +citibikenyc,id=3443,stationName=W\ 52\ St\ &\ 6\ Ave statusKey=1,location="",totalDocks=41,availableDocks=32,latitude=40.76132983124814,longitude=-73.97982001304626,availableBikes=8,stAddress2="",stAddress1="W 52 St & 6 Ave",statusValue="In Service" 1581533519000000000 +citibikenyc,id=367,stationName=E\ 53\ St\ &\ Lexington\ Ave availableBikes=8,stAddress1="E 53 St & Lexington Ave",longitude=-73.97069431,latitude=40.75828065,stAddress2="",statusKey=1,location="",statusValue="In Service",totalDocks=34,availableDocks=24 1581533492000000000 +citibikenyc,id=359,stationName=E\ 47\ St\ &\ Park\ Ave totalDocks=64,availableBikes=15,statusValue="In Service",location="",latitude=40.75510267,availableDocks=49,stAddress1="E 47 St & Park Ave",longitude=-73.97498696,statusKey=1,stAddress2="" 1581533535000000000 +citibikenyc,id=304,stationName=Broadway\ &\ Battery\ Pl statusValue="In Service",availableDocks=11,stAddress1="Broadway & Battery Pl",statusKey=1,stAddress2="",location="",totalDocks=33,latitude=40.70463334,longitude=-74.01361706,availableBikes=22 1581533499000000000 +``` + +Now, you can explore and query the Citi Bike data in InfluxDB. diff --git a/content/influxdb/v2.5/write-data/oss-to-cloud.md b/content/influxdb/v2.5/write-data/oss-to-cloud.md new file mode 100644 index 000000000..fddf0d471 --- /dev/null +++ b/content/influxdb/v2.5/write-data/oss-to-cloud.md @@ -0,0 +1,230 @@ +--- +title: Write data from InfluxDB OSS to InfluxDB Cloud +description: > + Use `to()` or `experimental.to()` to write data from InfluxDB OSS to InfluxDB Cloud. + Selectively write data or process data before writing it to InfluxDB Cloud. +menu: + influxdb_2_5: + name: Write from OSS to Cloud + parent: Write data +weight: 105 +influxdb/v2.5/tags: [write] +--- + +To write data from InfluxDB OSS to InfluxDB Cloud, use the Flux +[`to()`](/flux/v0.x/stdlib/influxdata/influxdb/to/) or +[`experimental.to()`](/flux/v0.x/stdlib/experimental/to/) functions. +Write data once with a single query execution or use [InfluxDB tasks](/influxdb/v2.5/process-data/) +to [routinely write data to InfluxDB Cloud](#automate-writing-data-from-influxdb-oss-to-influxdb-cloud). + +{{% note %}} +#### Replicate writes to InfluxDB OSS to InfluxDB Cloud +To replicate all writes to an InfluxDB OSS instance to an InfluxDB Cloud instance, +use [InfluxDB replication streams](/influxdb/v2.5/write-data/replication/). +{{% /note %}} + +{{% cloud %}} +#### InfluxDB Cloud rate limits +Write requests to InfluxDB Cloud are subject to the rate limits associated with your +[InfluxDB Cloud pricing plan](/influxdb/cloud/account-management/pricing-plans/). +{{% /cloud %}} + +1. Query data from InfluxDB OSS. +2. _(Optional)_ [Filter](/{{% latest "flux" %}}/stdlib/universe/filter/) or process data to write to InfluxDB Cloud. +3. Use `to` or `experimental.to` to write data to InfluxDB Cloud. + For most use cases, `to()` is the correct function to use, but depending on + the structure of the data you're writing, `experimental.to` may be required. + + **Use the following guidelines**: + + - **to()**: Use to write data in field keys to the `_field` column and field values to the `_value` column. + + - **experimental.to()**: Use to write data in column names to corresponding field keys and column values to field values. + + _See [input and output examples for `to()` functions](#input-and-output-data-for-to-functions)._ + +4. Provide the following parameters to either function: + + - **bucket**: InfluxDB Cloud bucket to write to + - **host**: InfluxDB Cloud region URL + - **org**: InfluxDB Cloud organization + - **token**: InfluxDB Cloud API Token + +5. ({{< req "Recommended" >}}) To keep your raw API token out of queries, store + your InfluxDB Cloud API token as an [InfluxDB secret](/influxdb/v2.5/security/secrets/) + in your InfluxDB OSS instance and use [`secrets.get()`](/flux/v0.x/stdlib/influxdata/influxdb/secrets/get/) + to retrieve the secret value as shown in the following example + (select the function you're using to see the correct format): + + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[to()](#) +[experimental.to()](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```js +import "influxdata/influxdb/secrets" + +cloudToken = secrets.get(key: "INFLUX_CLOUD_API_TOKEN") + +from(bucket: "example-oss-bucket") + |> range(start: -10m) + |> filter(fn: (r) => r._measurement == "example-measurement") + |> to( + bucket: "example-cloud-bucket", + host: "https://cloud2.influxdata.com", + org: "example-org", + token: cloudToken, + ) +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```js +import "experimental" +import "influxdata/influxdb/secrets" + +cloudToken = secrets.get(key: "INFLUX_CLOUD_API_TOKEN") + +from(bucket: "example-oss-bucket") + |> range(start: -10m) + |> filter(fn: (r) => r._measurement == "example-measurement") + |> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value") + |> experimental.to( + bucket: "example-cloud-bucket", + host: "https://cloud2.influxdata.com", + org: "example-org", + token: cloudToken, + ) +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +## Input and output data for to() functions + +{{< tabs-wrapper >}} +{{% tabs %}} +[to()](#) +[experimental.to()](#) +{{% /tabs %}} +{{% tab-content %}} + +- `to()` requires `_time`, `_measurement`, `_field`, and `_value` columns. +- `to()` writes all other columns as tags where the column name is the tag key + and the column value is the tag value. + +#### Input data +| _time | _measurement | exampleTag | _field | _value | +| :------------------- | :----------- | :--------: | :----- | -----: | +| 2021-01-01T00:00:00Z | example-m | A | temp | 80.0 | +| 2021-01-01T00:01:00Z | example-m | A | temp | 80.3 | +| 2021-01-01T00:02:00Z | example-m | A | temp | 81.1 | + +| _time | _measurement | exampleTag | _field | _value | +| :------------------- | :----------- | :--------: | :----- | -----: | +| 2021-01-01T00:00:00Z | example-m | A | rpm | 4023 | +| 2021-01-01T00:01:00Z | example-m | A | rpm | 4542 | +| 2021-01-01T00:02:00Z | example-m | A | rpm | 4901 | + +#### Output line protocol +``` +example-m,exampleTag=A temp=80.0,rpm=4023i 1609459200000000000 +example-m,exampleTag=A temp=80.3,rpm=4542i 1609459260000000000 +example-m,exampleTag=A temp=81.1,rpm=4901i 1609459320000000000 +``` +{{% /tab-content %}} + +{{% tab-content %}} +- `experimental.to()` requires `_time` and `_measurement` columns. +- Columns **in** the [group key](/flux/v0.x/get-started/data-model/#grouop-key) + (other than `_measurement`) are parsed as tags where the column name is the + tag key and the column value is the tag value. +- Columns **not in** the group key (other than `_time_`) are parsed as fields + where the column name is the field key and the column value is the field value. + +#### Input data {id="experimental-input-data"} +{{< flux/group-key "[_measurement, exampleTag]">}} +| _time | _measurement | exampleTag | temp | rpm | +| :------------------- | :----------- | :--------: | ---: | ---: | +| 2021-01-01T00:00:00Z | example-m | A | 80.0 | 4023 | +| 2021-01-01T00:01:00Z | example-m | A | 80.3 | 4542 | +| 2021-01-01T00:02:00Z | example-m | A | 81.1 | 4901 | + +#### Output line protocol {id="experimental-output-line-protocol"} +``` +example-m,exampleTag=A temp=80.0,rpm=4023i 1609459200000000000 +example-m,exampleTag=A temp=80.3,rpm=4542i 1609459260000000000 +example-m,exampleTag=A temp=81.1,rpm=4901i 1609459320000000000 +``` +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## Examples + +- [Downsample and write data to InfluxDB Cloud](#downsample-and-write-data-to-influxdb-cloud) +- [Write min, max, and mean values to InfluxDB Cloud](#write-min-max-and-mean-values-to-influxdb-cloud) + +#### Downsample and write data to InfluxDB Cloud +```js +import "influxdata/influxdb/secrets" + +cloudToken = secrets.get(key: "INFLUX_CLOUD_API_TOKEN") + +from(bucket: "example-oss-bucket") + |> range(start: -10m) + |> filter(fn: (r) => r._measurement == "example-measurement") + |> aggregateWindow(every: 1m, fn: last) + |> to( + bucket: "example-cloud-bucket", + host: "https://cloud2.influxdata.com", + org: "example-org", + token: cloudToken, + ) +``` + +#### Write min, max, and mean values to InfluxDB Cloud +```js +import "influxdata/influxdb/secrets" + +cloudToken = secrets.get(key: "INFLUX_CLOUD_API_TOKEN") + +data = from(bucket: "example-oss-bucket") + |> range(start: -30m) + |> filter(fn: (r) => r._measurement == "example-measurement") + +min = data |> aggregateWindow(every: 10m, fn: min) |> map(fn: (r) => ({ r with _field: "{$r._field}_min" })) +max = data |> aggregateWindow(every: 10m, fn: max) |> map(fn: (r) => ({ r with _field: "{$r._field}_max" })) +mean = data |> aggregateWindow(every: 10m, fn: mean) |> map(fn: (r) => ({ r with _field: "{$r._field}_mean" })) + +union(tables: [min, max, mean]) + |> to( + bucket: "example-cloud-bucket", + host: "https://cloud2.influxdata.com", + org: "example-org", + token: cloudToken, + ) +``` + +## Automate writing data from InfluxDB OSS to InfluxDB Cloud +To automatically and routinely write data from InfluxDB OSS to InfluxDB Cloud, +[create a task](/influxdb/v2.5/process-data/manage-tasks/create-task/) in your +InfluxDB OSS instance that regularly queries, processes, and writes data to +InfluxDB Cloud. + +```js +import "influxdata/influxdb/tasks" + +option task = {name: "Downsample to InfluxDB Cloud", every: 1h} + +from(bucket: "example-oss-bucket") + |> range(start: -10m) + |> filter(fn: (r) => r._measurement == "example-measurement") + |> aggregateWindow(every: 1m, fn: last) + |> to( + bucket: "example-cloud-bucket", + host: "https://cloud2.influxdata.com", + org: "example-org", + token: cloudToken, + ) +``` + diff --git a/content/influxdb/v2.5/write-data/replication/_index.md b/content/influxdb/v2.5/write-data/replication/_index.md new file mode 100644 index 000000000..d954b75e9 --- /dev/null +++ b/content/influxdb/v2.5/write-data/replication/_index.md @@ -0,0 +1,22 @@ +--- +title: Edge Data Replication +seotitle: InfluxDB Edge Data Replication +description: > + Use InfluxDB Edge Data Replication to replicate local data at the edge to InfluxDB Cloud. +weight: 106 +menu: + influxdb_2_5: + name: Edge data replication + parent: Write data +--- + +Running [InfluxDB OSS](/influxdb/v2.5/install/) at the edge lets you collect, process, transform, and analyze high-precision data locally. +**Edge Data Replication** lets you replicate data from distributed edge environments to [InfluxDB Cloud](/influxdb/cloud/sign-up/), aggregating and storing data for long-term management and analysis. + +{{< youtube qsj_TTpDyf4 >}} + +{{% note %}} +While replicating data from InfluxDB OSS to InfluxDB Cloud is the most common use case, you may also replicate data from any InfluxDB bucket to a bucket in another InfluxDB instance, for example, InfluxDB OSS, InfluxDB Cloud, or InfluxDB Enterprise. +{{% /note %}} + +{{< children >}} diff --git a/content/influxdb/v2.5/write-data/replication/replicate-data.md b/content/influxdb/v2.5/write-data/replication/replicate-data.md new file mode 100644 index 000000000..46d0a75ad --- /dev/null +++ b/content/influxdb/v2.5/write-data/replication/replicate-data.md @@ -0,0 +1,285 @@ +--- +title: Replicate data from InfluxDB OSS +weight: 106 +description: > + Replicate data from select InfluxDB OSS buckets to remote buckets in InfluxDB + Cloud, InfluxDB OSS, or InfluxDB Enterprise instances. +menu: + influxdb_2_5: + name: Replicate data + parent: Edge data replication +influxdb/v2.5/tags: [write, replication] +related: + - /influxdb/v2.5/reference/cli/influx/remote + - /influxdb/v2.5/reference/cli/influx/replication +--- + +Use InfluxDB replication streams (InfluxDB Edge Data Replication) to replicate +the incoming data of select buckets to one or more buckets on a remote +InfluxDB OSS, InfluxDB Cloud, or InfluxDB Enterprise instance. + +Replicate data from InfluxDB OSS to InfluxDB Cloud, InfluxDB OSS, or InfluxDB Enterprise. + +## Configure a replication stream + +Use the [`influx` CLI](/influxdb/v2.5/tools/influx-cli/) or the +[InfluxDB {{< current-version >}} API](/influxdb/v2.5/reference/api/) to configure +a replication stream. + +{{% note %}} +To replicate data to InfluxDB OSS or InfluxDB Enterprise, adjust the +remote connection values accordingly. +{{% /note %}} + +{{< tabs-wrapper >}} +{{% tabs %}} +[CLI](#) +[API](#) +{{% /tabs %}} +{{% tab-content %}} + + + +1. In your {{% oss-only %}}local{{% /oss-only %}} InfluxDB OSS instance, use + the `influx remote create` command to create a remote connection to replicate data to. + + **Provide the following:** + + - Remote connection name + - {{% oss-only %}}Remote InfluxDB instance URL{{% /oss-only %}} + - {{% oss-only %}}Remote InfluxDB API token _(API token must have write access to the target bucket)_{{% /oss-only %}} + - {{% oss-only %}}Remote InfluxDB organization ID{{% /oss-only %}} + - {{% cloud-only %}}[InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/){{% /cloud-only %}} + - {{% cloud-only %}}InfluxDB Cloud API token _(API token must have write access to the target bucket)_{{% /cloud-only %}} + - {{% cloud-only %}}InfluxDB Cloud organization ID{{% /cloud-only %}} + + ```sh + influx remote create \ + --name example-remote-name \ + --remote-url https://cloud2.influxdata.com \ + --remote-api-token mYsuP3r5Ecr37t0k3n \ + --remote-org-id 00xoXXoxXX00 + ``` + + If you already have remote InfluxDB connections configured, you can use an existing connection. To view existing connections, run `influx remote list`. + +2. In your {{% oss-only %}}local{{% /oss-only %}} InfluxDB OSS instance, use the + `influx replication create` command to create a replication stream. + + **Provide the following:** + + - Replication stream name + - {{% oss-only %}}Remote connection ID{{% /oss-only %}} + - {{% oss-only %}}Local bucket ID to replicate writes from{{% /oss-only %}} + - {{% oss-only %}}Remote bucket name or ID to replicate writes to. If replicating to **InfluxDB Enterprise**, use the `db-name/rp-name` bucket name syntax.{{% /oss-only %}} + - {{% cloud-only %}}Remote connection ID{{% /cloud-only %}} + - {{% cloud-only %}}InfluxDB OSS bucket ID to replicate writes from{{% /cloud-only %}} + - {{% cloud-only %}}InfluxDB Cloud bucket ID to replicate writes to{{% /cloud-only %}} + + + ```sh + influx replication create \ + --name REPLICATION_STREAM_NAME \ + --remote-id REPLICATION_REMOTE_ID \ + --local-bucket-id INFLUX_BUCKET_ID \ + --remote-bucket REMOTE_INFLUX_BUCKET_NAME + ``` + +Once a replication stream is created, InfluxDB {{% oss-only %}}OSS{{% /oss-only %}} +will replicate all writes to the specified bucket to the {{% oss-only %}}remote {{% /oss-only %}} +InfluxDB {{% cloud-only %}}Cloud {{% /cloud-only %}}bucket. +Use the `influx replication list` command to view information such as the current queue size, +max queue size, and latest status code. + + + +{{% /tab-content %}} +{{% tab-content %}} + + + +1. Send a `POST` request to your {{% oss-only %}}local{{% /oss-only %}} InfluxDB OSS `/api/v2/remotes` endpoint to create a remote connection to replicate data to. + + {{< keep-url >}} + + {{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" >}} + + + Include the following in your request: + + - **Request method:** `POST` + - **Headers:** + - **Authorization:** `Token` scheme with your {{% oss-only %}}local{{% /oss-only %}} InfluxDB OSS [API token](/influxdb/v2.5/security/tokens/) + - **Content-type:** `application/json` + - **Request body:** JSON object with the following fields: + {{< req type="key" >}} + - {{< req "\*" >}} **allowInsecureTLS:** All insecure TLS connections + - **description:** Remote description + - {{< req "\*" >}} **name:** Remote connection name + - {{< req "\*" >}} **orgID:** {{% oss-only %}}local{{% /oss-only %}} InfluxDB OSS organization ID + - {{% oss-only %}}{{< req "\*" >}} **remoteAPIToken:** Remote InfluxDB API token _(API token must have write access to the target bucket)_{{% /oss-only %}} + - {{% oss-only %}}{{< req "\*" >}} **remoteOrgID:** Remote InfluxDB organization ID{{% /oss-only %}} + - {{% oss-only %}}{{< req "\*" >}} **remoteURL:** Remote InfluxDB instance URL{{% /oss-only %}} + - {{% cloud-only %}}{{< req "\*" >}} **remoteAPIToken:** InfluxDB Cloud API token _(API token must have write access to the target bucket)_{{% /cloud-only %}} + - {{% cloud-only %}}{{< req "\*" >}} **remoteOrgID:** InfluxDB Cloud organization ID{{% /cloud-only %}} + - {{% cloud-only %}}{{< req "\*" >}} **remoteURL:** [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/){{% /cloud-only %}} + + {{< keep-url >}} + ```sh + curl --request POST http://localhost:8086/api/v2/remotes \ + --header 'Authorization: Token INFLUX_OSS_TOKEN' \ + --data '{ + "allowInsecureTLS": false, + "description": "Example remote description", + "name": "Example remote name", + "orgID": "INFLUX_OSS_ORG_ID", + "remoteAPIToken": "REMOTE_INFLUX_TOKEN", + "remoteOrgID": "REMOTE_INFLUX_ORG_ID", + "remoteURL": "https://cloud2.influxdata.com" + }' + ``` + + If you already have remote InfluxDB connections configured, you can use an + existing connection. To view existing connections, use the `/api/v2/remotes` + endpoint with the `GET` request method. + + {{< keep-url >}} + + {{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="GET" >}} + + + Include the following in your request: + + - **Request method:** `GET` + - **Headers:** + - **Authorization:** `Token` scheme with your {{% oss-only %}}local{{% /oss-only %}} InfluxDB OSS [API token](/influxdb/v2.5/security/tokens/) + - **Query parameters:** + - **orgID:** {{% oss-only %}}Local{{% /oss-only %}} InfluxDB OSS organization ID + + {{< keep-url >}} + ```sh + curl --request GET \ + http://localhost:8086/api/v2/remotes?orgID=INFLUX_OSS_ORG_ID \ + --header 'Authorization: Token INFLUX_OSS_TOKEN' \ + ``` + +2. Send a `POST` request to your {{% oss-only %}}local{{% /oss-only %}} InfluxDB OSS + `/api/v2/replications` endpoint to create a replication stream. + + {{< keep-url >}} + + {{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" >}} + + + Include the following in your request: + + - **Request method:** `POST` + - **Headers:** + - **Authorization:** `Token` scheme with your {{% oss-only %}}local{{% /oss-only %}} InfluxDB OSS [API token](/influxdb/v2.5/security/tokens/) + - **Content-type:** `application/json` + - **Request body:** JSON object with the following fields: + {{< req type="key" >}} + - **dropNonRetryableData:** Drop data when a non-retryable error is encountered. + - {{< req "\*" >}} **localBucketID:** {{% oss-only %}}Local{{% /oss-only %}} InfluxDB OSS bucket ID to replicate writes from. + - {{< req "\*" >}} **maxAgeSeconds:** Maximum age of data in seconds before it is dropped (default is `604800`, must be greater than or equal to `0`). + - {{< req "\*" >}} **maxQueueSizeBytes:** Maximum replication queue size in bytes (default is `67108860`, must be greater than or equal to `33554430`). + - {{< req "\*" >}} **name:** Replication stream name. + - {{< req "\*" >}} **orgID:** {{% oss-only %}}Local{{% /oss-only %}} InfluxDB OSS organization ID. + - {{% oss-only %}}{{< req "\*" >}} **remoteBucketID:** Remote bucket ID to replicate writes to.{{% /oss-only %}} + - {{% oss-only %}}{{< req "\*" >}} **remoteBucketName:** Remote bucket name to replicate writes to. If replicating to **InfluxDB Enterprise**, use the `db-name/rp-name` bucket name syntax.{{% / oss-only %}} + - {{% cloud-only %}}{{< req "\*" >}} **remoteBucketID:** InfluxDB Cloud bucket ID to replicate writes to.{{% /cloud-only %}} + - {{% cloud-only %}}{{< req "\*" >}} **remoteBucketName:** InfluxDB Cloud bucket name to replicate writes to.{{% / cloud-only %}} + - {{< req "\*" >}} **remoteID:** Remote connection ID + + {{% note %}} +`remoteBucketID` and `remoteBucketName` are mutually exclusive. +{{% oss-only %}}If replicating to **InfluxDB Enterprise**, use `remoteBucketName` with the `db-name/rp-name` bucket name syntax.{{% /oss-only %}} + {{% /note %}} + +{{< keep-url >}} +```sh +curl --request POST http://localhost:8086/api/v2/replications \ + --header 'Authorization: Token INFLUX_OSS_TOKEN' \ + --data '{ + "dropNonRetryableData": false, + "localBucketID": "INFLUX_OSS_BUCKET_ID", + "maxAgeSeconds": 604800, + "maxQueueSizeBytes": 67108860, + "name": "Example replication stream name", + "orgID": "INFLUX_OSS_ORG_ID", + "remoteBucketName": "REMOTE_INFLUX_BUCKET_NAME", + "remoteID": "REMOTE_ID", + }' +``` + +Once a replication stream is created, InfluxDB {{% oss-only %}}OSS{{% /oss-only %}} +will replicate all writes from the specified local bucket to the {{% oss-only %}}remote {{% /oss-only %}} +InfluxDB {{% cloud-only %}}Cloud {{% /cloud-only %}}bucket. +To get +information such as the current queue size, max queue size, and latest status +code for each replication stream, send a `GET` request to your {{% oss-only %}}local{{% /oss-only %}} InfluxDB OSS `/api/v2/replications` endpoint. + +{{< keep-url >}} + +{{< api-endpoint endpoint="localhost:8086/api/v2/replications" method="GET" >}} + + +Include the following in your request: + +- **Request method:** `GET` +- **Headers:** + - **Authorization:** `Token` scheme with your {{% oss-only %}}local{{% /oss-only %}} InfluxDB OSS [API token](/influxdb/v2.5/security/tokens/) +- **Query parameters:** + - **orgID:** {{% oss-only %}}Local{{% /oss-only %}} InfluxDB OSS organization ID + +{{< keep-url >}} +```sh +curl --request GET \ + http://localhost:8086/api/v2/replications?orgID=INFLUX_OSS_ORG_ID \ + --header 'Authorization: Token INFLUX_OSS_TOKEN' \ +``` + + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +{{% note %}} +#### Important things to note + +- Only write operations are replicated. Other data operations (like deletes or restores) are not replicated. +- In InfluxDB OSS, large write request bodies are written entirely. + When replicated, write requests are sent to the remote bucket in batches. + The maximum batch size is 500 kB (typically between 250 to 500 lines of line protocol). + This may result in scenarios where some batches succeed and others fail. +{{% /note %}} + +## Replicate downsampled or processed data +In some cases, you may not want to write raw, high-precision data to a remote InfluxDB {{% cloud-only %}}Cloud {{% /cloud-only %}} instance. To replicate only downsampled or processed data: + +1. Create a bucket in your InfluxDB OSS instance to store downsampled or processed data in. +2. Create an InfluxDB task that downsamples or processes data and stores it in the new bucket. For example: + + ```js + import "influxdata/influxdb/tasks" + import "types" + + // omit this line if adding task via the UI + option task = {name: "Downsample raw data", every: 10m} + + data = () => from(bucket: "example-bucket") + |> range(start: tasks.lastSuccess(orTime: -task.every)) + + numeric = data() + |> filter(fn: (r) => types.isType(v: r._value, type: "float") or types.isType(v: r._value, type: "int") or types.isType(v: r._value, type: "uint")) + |> aggregateWindow(every: task.every, fn: mean) + + nonNumeric = data() + |> filter(fn: (r) => types.isType(v: r._value, type: "string") or types.isType(v: r._value, type: "bool")) + |> aggregateWindow(every: task.every, fn: last) + + union(tables: [numeric, nonNumeric]) + |> to(bucket: "example-downsampled-bucket") + ``` + +3. [Create a replication stream](#configure-a-replication-stream) to replicate data from the downsampled bucket to the remote InfluxDB {{% cloud-only %}}Cloud {{% /cloud-only %}}instance. diff --git a/content/influxdb/v2.5/write-data/troubleshoot.md b/content/influxdb/v2.5/write-data/troubleshoot.md new file mode 100644 index 000000000..cf1a978be --- /dev/null +++ b/content/influxdb/v2.5/write-data/troubleshoot.md @@ -0,0 +1,324 @@ +--- +title: Troubleshoot issues writing data +seotitle: Troubleshoot issues writing data to InfluxDB +weight: 106 +description: > + Troubleshoot issues writing data. Find response codes for failed writes. Discover how writes fail, from exceeding rate or payload limits, to syntax errors and schema conflicts. +menu: + influxdb_2_5: + name: Troubleshoot issues + parent: Write data +influxdb/v2.5/tags: [write, line protocol, errors] +related: + - /influxdb/v2.5/api/#tag/Write, InfluxDB API /write endpoint + - /influxdb/v2.5/reference/internals + - /influxdb/v2.5/reference/cli/influx/write +--- +Learn how to avoid unexpected results and recover from errors when writing to InfluxDB. + +{{% oss-only %}} + +- [Handle `write` and `delete` responses](#handle-write-and-delete-responses) +- [Troubleshoot failures](#troubleshoot-failures) + +{{% /oss-only %}} + +{{% cloud-only %}} + +- [Handle `write` and `delete` responses](#handle-write-and-delete-responses) +- [Troubleshoot failures](#troubleshoot-failures) +- [Troubleshoot rejected points](#troubleshoot-rejected-points) + +{{% /cloud-only %}} + +## Handle `write` and `delete` responses + +{{% cloud-only %}} + +In InfluxDB Cloud, writes and deletes are asynchronous and eventually consistent. +Once InfluxDB validates your request and [queues](/influxdb/cloud/reference/internals/durability/#backup-on-write) the write or delete, it sends a _success_ response (HTTP `204` status code) as an acknowledgement. +To ensure that InfluxDB handles writes and deletes in the order you request them, wait for the acknowledgement before you send the next request. +Because writes are asynchronous, keep the following in mind: + +- Data might not yet be queryable when you receive _success_ (HTTP `204` status code). +- InfluxDB may still reject points after you receive _success_ (HTTP `204` status code). + +{{% /cloud-only %}} + +{{% oss-only %}} + +If InfluxDB OSS successfully writes all the request data to the bucket, InfluxDB returns _success_ (HTTP `204` status code). +The first rejected point in a batch causes InfluxDB to reject the entire batch and respond with an [HTTP error status](#review-http-status-codes). + +{{% /oss-only %}} + +### Review HTTP status codes + +InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request. +Write requests return the following status codes: + +{{% cloud-only %}} + +| HTTP response code | Message | Description | +| :-------------------------------| :--------------------------------------------------------------- | :------------- | +| `204 "Success"` | | If InfluxDB validated the request data format and queued the data for writing to the bucket | +| `400 "Bad request"` | `message` contains the first malformed line | If data is malformed | +| `401 "Unauthorized"` | | If the [`Authorization: Token` header](/influxdb/cloud/api-guide/api_intro/#authentication) is missing or malformed or if the [API token](/influxdb/cloud/api-guide/api_intro/#authentication) doesn't have [permission](/influxdb/cloud/security/tokens/) to write to the bucket | +| `404 "Not found"` | requested **resource type**, e.g. "organization", and **resource name** | If a requested resource (e.g. organization or bucket) wasn't found | +| `413 “Request too large”` | cannot read data: points in batch is too large | If a **write** request exceeds the maximum [global limit](/influxdb/cloud/account-management/limits/#global-limits) | +| `429 “Too many requests”` | `Retry-After` header: xxx (seconds to wait before retrying the request) | If a **read** or **write** request exceeds your plan's [adjustable service quotas](/influxdb/cloud/account-management/limits/#adjustable-service-quotas) or if a **delete** request exceeds the maximum [global limit](/influxdb/cloud/account-management/limits/#global-limits) | +| `500 "Internal server error"` | | Default status for an error | +| `503 “Service unavailable“` | Series cardinality exceeds your plan's service quota | If **series cardinality** exceeds your plan's [adjustable service quotas](/influxdb/cloud/account-management/limits/#adjustable-service-quotas) | + +{{% /cloud-only %}} + +{{% oss-only %}} + +- `204` **Success**: All request data was written to the bucket. +- `400` **Bad request**: The [line protocol](/influxdb/v2.5/reference/syntax/line-protocol/) data in the request was malformed. + The response body contains the first malformed line in the data. All request data was rejected and not written. +- `401` **Unauthorized**: May indicate one of the following: + - [`Authorization: Token` header](/influxdb/v2.5/api-guide/api_intro/#authentication) is missing or malformed. + - [API token](/influxdb/v2.5/api-guide/api_intro/#authentication) value is missing from the header. + - API token does not have sufficient permissions to write to the organization and the bucket. For more information about token types and permissions, see [Manage API tokens](/influxdb/v2.5/security/tokens/). +- `404` **Not found**: A requested resource (e.g. an organization or bucket) was not found. The response body contains the requested resource type, e.g. "organization", and resource name. +- `413` **Request entity too large**: All request data was rejected and not written. InfluxDB OSS only returns this error if the [Go (golang) `ioutil.ReadAll()`](https://pkg.go.dev/io/ioutil#ReadAll) function raises an error. +- `500` **Internal server error**: Default HTTP status for an error. +- `503` **Service unavailable**: Server is temporarily unavailable to accept writes. The `Retry-After` header describes when to try the write again. + +{{% /oss-only %}} + +The `message` property of the response body may contain additional details about the error. +If some of your data did not write to the bucket, see how to [troubleshoot rejected points](#troubleshoot-rejected-points). + +{{% cloud-only %}} + +### Troubleshoot partial writes + +Because writes are asynchronous, they may fail partially or completely even though InfluxDB returns an HTTP `2xx` status code for a valid request. +For example, a partial write may occur when InfluxDB writes all points that conform to the bucket schema, but rejects points that have the wrong data type in a field. +To check for writes that fail asynchronously, create a [task](/influxdb/cloud/process-data/manage-tasks/) to [check the _monitoring bucket for rejected points](#review-rejected-points). +To resolve partial writes and rejected points, see [troubleshoot failures](#troubleshoot-failures). + +{{% /cloud-only %}} + +## Troubleshoot failures + +{{% oss-only %}} + +If you notice data is missing in your bucket, do the following: + +- Check the `message` property in the response body for details about the error. +- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points). +- Verify all lines contain valid syntax ([line protocol](/influxdb/v2.5/reference/syntax/line-protocol/) or [CSV](/influxdb/v2.5/reference/syntax/annotated-csv/)). +- Verify the timestamps match the [precision parameter](/influxdb/v2.5/write-data/#timestamp-precision). +- Minimize payload size and network errors by [optimizing writes](/influxdb/v2.5/write-data/best-practices/optimize-writes/). + +{{% /oss-only %}} + +{{% cloud-only %}} +If you notice data is missing in your bucket, do the following: + +- Check the `message` property in the response body for details about the error--for example, `partial write error` indicates [rejected points](#troubleshoot-rejected-points). +- Check for [rejected points](#troubleshoot-rejected-points) in your organization's `_monitoring` bucket. +- Verify all lines contain valid syntax ([line protocol](/influxdb/cloud/reference/syntax/line-protocol/) or [CSV](/influxdb/cloud/reference/syntax/annotated-csv/)). See how to [find parsing errors](#find-parsing-errors). +- Verify the data types match the [series](/influxdb/cloud/reference/key-concepts/data-elements/#series) or [bucket schema](/influxdb/cloud/organizations/buckets/bucket-schema/). See how to resolve [explicit schema rejections](#resolve-explicit-schema-rejections). +- Verify the timestamps match the [precision parameter](/influxdb/cloud/write-data/#timestamp-precision). +- Minimize payload size and network errors by [optimizing writes](/influxdb/cloud/write-data/best-practices/optimize-writes/). +{{% /cloud-only %}} + +## Troubleshoot rejected points + +{{% oss-only %}} + +InfluxDB rejects points for the following reasons: + +- The **batch** contains another point with the same series, but one of the fields has a different value type. +- The **bucket** contains another point with the same series, but one of the fields has a different value type. + +Check for [field type](/influxdb/v2.5/reference/key-concepts/data-elements/#field-value) differences between the missing data point and other points that have the same [series](/influxdb/v2.5/reference/key-concepts/data-elements/#series)--for example, did you attempt to write `string` data to an `int` field? + +{{% /oss-only %}} + +{{% cloud-only %}} + +InfluxDB may have rejected points even if the HTTP request returned "Success". +InfluxDB logs rejected data points and associated errors to your organization's `_monitoring` bucket. + +- [Review rejected points](#review-rejected-points) + - [Find parsing errors](#find-parsing-errors) + - [Find data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections) +- [Resolve data type conflicts](#resolve-data-type-conflicts) +- [Resolve explicit schema rejections](#resolve-explicit-schema-rejections) + +### Review rejected points + +To get a log of rejected points, query the [`rejected_points` measurement](/influxdb/cloud/reference/internals/system-buckets/#_monitoring-bucket-schema) in your organization's `_monitoring` bucket. +To more quickly locate `rejected_points`, keep the following in mind: + +- If your line protocol batch contains single lines with multiple [fields](/influxdb/cloud/reference/syntax/line-protocol/#field-set), InfluxDB logs an entry for each point (each unique field) that is rejected. +- Each entry contains a `reason` tag that describes why the point was rejected. +- Entries for [data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections) have a `count` field value of `1`. +- Entries for [parsing errors](#find-parsing-errors) contain an `error` field (and don't contain a `count` field). + +#### rejected_points schema + +| Name | Value | +|:------ |:----- | +| `_measurement`| `rejected_points` | +| `_field` | [`count`](#find-data-type-conflicts-and-schema-rejections) or [`error`](#find-parsing-errors) | +| `_value` | [`1`](#find-data-type-conflicts-and-schema-rejections) or [error details](#find-parsing-errors) | +| `bucket` | ID of the bucket that rejected the point | +| `measurement` | Measurement name of the point | +| `field` | Name of the field that caused the rejection | +| `reason` | Brief description of the problem. See specific reasons in [data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections) | +| `gotType` | Received [field](/influxdb/cloud/reference/key-concepts/data-elements/#field-value) type: `Boolean`, `Float`, `Integer`, `String`, or `UnsignedInteger` | +| `wantType` | Expected [field](/influxdb/cloud/reference/key-concepts/data-elements/#field-value) type: `Boolean`, `Float`, `Integer`, `String`, or `UnsignedInteger` | +| `` | Time the rejected point was logged | + +#### Find parsing errors + +If InfluxDB can't parse a line (e.g. due to syntax problems), the response `message` might not provide details. +To find parsing error details, query `rejected_points` entries that contain the `error` field. + +```js +from(bucket: "_monitoring") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "rejected_points") + |> filter(fn: (r) => r._field == "error") +``` + +#### Find data type conflicts and schema rejections + +To find `rejected_points` caused by [data type conflicts](#resolve-data-type-conflicts) or [schema rejections](#resolve-explicit-schema-rejections), +query for the `count` field. + +```js +from(bucket: "_monitoring") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "rejected_points") + |> filter(fn: (r) => r._field == "count") +``` + +### Resolve data type conflicts + +When you write to a bucket that has the `implicit` schema type, InfluxDB compares new points to points that have the same [series](/influxdb/cloud/reference/key-concepts/data-elements/#series). +If a point has a field with a different data type than the series, InfluxDB rejects the point and logs a `rejected_points` entry. +The `rejected_points` entry contains one of the following reasons: + +| Reason | Meaning | +|:------ |:------- | +| `type conflict in batch write` | The **batch** contains another point with the same series, but one of the fields has a different value type. | +| `type conflict with existing data` | The **bucket** contains another point with the same series, but one of the fields has a different value type. | + +### Resolve explicit schema rejections + +If you write to a bucket with an +[explicit schema](/influxdb/cloud/organizations/buckets/bucket-schema/), +the data must conform to the schema. Otherwise, InfluxDB rejects the data. + +Do the following to interpret explicit schema rejections: + +- [Detect a measurement mismatch](#detect-a-measurement-mismatch) +- [Detect a field type mismatch](#detect-a-field-type-mismatch) + +#### Detect a measurement mismatch + +InfluxDB rejects a point if the [measurement](/influxdb/cloud/reference/key-concepts/data-elements/#measurement) doesn't match the **name** of a [bucket schema](/influxdb/cloud/organizations/buckets/bucket-schema/). +The `rejected_points` entry contains the following `reason` tag value: + +| Reason | Meaning | +|:------ |:------- +| `measurement not allowed by schema` | The **bucket** is configured to use explicit schemas and none of the schemas matches the **measurement** of the point. | + +Consider the following [line protocol](/influxdb/cloud/reference/syntax/line-protocol) data. + +``` +airSensors,sensorId=TLM0201 temperature=73.97,humidity=35.23,co=0.48 1637014074 +``` + +The line has an `airSensors` measurement and three fields (`temperature`, `humidity`, and `co`). +If you try to write this data to a bucket that has the [`explicit` schema type](/influxdb/cloud/organizations/buckets/bucket-schema/) and doesn't have an `airSensors` schema, the `/api/v2/write` InfluxDB API returns an error and the following data: + +```json +{ + "code": "invalid", + "message": "3 out of 3 points rejected (check rejected_points in your _monitoring bucket for further information)" +} +``` + +InfluxDB logs three `rejected_points` entries, one for each field. + +| _measurement | _field | _value | field | measurement | reason | +|:----------------|:-------|:-------|:------------|:------------|:----------------------------------| +| rejected_points | count | 1 | humidity | airSensors | measurement not allowed by schema | +| rejected_points | count | 1 | co | airSensors | measurement not allowed by schema | +| rejected_points | count | 1 | temperature | airSensors | measurement not allowed by schema | + +#### Detect a field type mismatch + +InfluxDB rejects a point if the [measurement](/influxdb/cloud/reference/key-concepts/data-elements/#measurement) matches the **name** of a bucket schema and the field data types don't match. +The `rejected_points` entry contains the following reason: + +| Reason | Meaning | +|:------------------------------------|:-----------------------------------------------------------------------------------------------------| +| `field type mismatch with schema` | The point has the same measurement as a configured schema and they have different field value types. | + +Consider a bucket that has the following `airSensors` [`explicit bucket schema`](/influxdb/cloud/organizations/buckets/bucket-schema/): + +```json +{ + "name": "airSensors", + "columns": [ + { + "name": "time", + "type": "timestamp" + }, + { + "name": "sensorId", + "type": "tag" + }, + { + "name": "temperature", + "type": "field", + "dataType": "float" + }, + { + "name": "humidity", + "type": "field", + "dataType": "float" + }, + { + "name": "co", + "type": "field", + "dataType": "float" + } + ] +} +``` + +The following [line protocol](/influxdb/cloud/reference/syntax/line-protocol/) data has an `airSensors` measurement, a `sensorId` tag, and three fields (`temperature`, `humidity`, and `co`). + +``` +airSensors,sensorId=L1 temperature=90.5,humidity=70.0,co=0.2 1637014074 +airSensors,sensorId=L1 temperature="90.5",humidity=70.0,co=0.2 1637014074 +``` + +In the example data above, the second point has a `temperature` field value with the _string_ data type. +Because the `airSensors` schema requires `temperature` to have the _float_ data type, +InfluxDB returns a `400` error and a message that describes the result: + +```json +{ + "code": "invalid", + "message": "partial write error (5 accepted): 1 out of 6 points rejected (check rejected_points in your _monitoring bucket for further information)" +} +``` + +InfluxDB logs the following `rejected_points` entry to the `_monitoring` bucket: + +| _measurement | _field | _value | bucket | field | gotType | measurement | reason | wantType | +|:------------------|:-------|:-------|:-------------------|:--------------|:---------|:------------|:----------------------------------|:---------| +| rejected_points | count | 1 | a7d5558b880a93da | temperature | String | airSensors | field type mismatch with schema | Float | + +{{% /cloud-only %}} \ No newline at end of file diff --git a/data/products.yml b/data/products.yml index 40e7b2486..7bc3aa37d 100644 --- a/data/products.yml +++ b/data/products.yml @@ -3,10 +3,11 @@ influxdb: altname: InfluxDB OSS namespace: influxdb list_order: 2 - versions: [v1.3, v1.4, v1.5, v1.6, v1.7, v1.8, v2.0, v2.1, v2.2, v2.3, v2.4] - latest: v2.4 - latest_override: v2.4 + versions: [v1.3, v1.4, v1.5, v1.6, v1.7, v1.8, v2.0, v2.1, v2.2, v2.3, v2.4, v2.5] + latest: v2.5 + latest_override: v2.5 latest_patches: + "2.5": 0 "2.4": 0 "2.3": 0 "2.2": 0 @@ -19,6 +20,7 @@ influxdb: "1.4": 3 "1.3": 9 latest_cli: + "2.5": 2.5.0 "2.4": 2.4.0 "2.3": 2.3.0 "2.2": 2.2.1 diff --git a/deploy/edge.js b/deploy/edge.js index febc06442..204eb345a 100644 --- a/deploy/edge.js +++ b/deploy/edge.js @@ -3,8 +3,8 @@ const path = require('path'); const latestVersions = { - 'influxdb': 'v2.4', - 'influxdbv2': 'v2.4', + 'influxdb': 'v2.5', + 'influxdbv2': 'v2.5', 'telegraf': 'v1.23', 'chronograf': 'v1.10', 'kapacitor': 'v1.6',