parent
62b0f31d2f
commit
c91442f88c
|
@ -477,22 +477,6 @@ flowchart TB
|
|||
{{< /diagram >}}
|
||||
```
|
||||
|
||||
### File system diagrams
|
||||
Use the `{{< filesystem-diagram >}}` shortcode to create a styled file system
|
||||
diagram using a Markdown unordered list.
|
||||
|
||||
##### Example filestsytem diagram shortcode
|
||||
```md
|
||||
{{< filesystem-diagram >}}
|
||||
- Dir1/
|
||||
- Dir2/
|
||||
- ChildDir/
|
||||
- Child
|
||||
- Child
|
||||
- Dir3/
|
||||
{{< /filesystem-diagram >}}
|
||||
```
|
||||
|
||||
### High-resolution images
|
||||
In many cases, screenshots included in the docs are taken from high-resolution (retina) screens.
|
||||
Because of this, the actual pixel dimension is 2x larger than it needs to be and is rendered 2x bigger than it should be.
|
||||
|
@ -556,17 +540,6 @@ Markdown content associated with label 2.
|
|||
{{< /expand-wrapper >}}
|
||||
```
|
||||
|
||||
### Captions
|
||||
Use the `{{% caption %}}` shortcode to add captions to images and code blocks.
|
||||
Captions are styled with a smaller font size, italic text, slight transparency,
|
||||
and appear directly under the previous image or code block.
|
||||
|
||||
```md
|
||||
{{% caption %}}
|
||||
Markdown content for the caption.
|
||||
{{% /caption %}}
|
||||
```
|
||||
|
||||
### Generate a list of children articles
|
||||
Section landing pages often contain just a list of articles with links and descriptions for each.
|
||||
This can be cumbersome to maintain as content is added.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,4 +1,3 @@
|
|||
# this is a manually maintained file for these old routes until oats#15 is resolved
|
||||
openapi: "3.0.0"
|
||||
info:
|
||||
title: Influx API Service (V1 compatible endpoints)
|
||||
|
@ -119,7 +118,7 @@ paths:
|
|||
name: Accept
|
||||
schema:
|
||||
type: string
|
||||
description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps.
|
||||
description: Specifies how query results should be encoded in the response.
|
||||
default: application/json
|
||||
enum:
|
||||
- application/json
|
||||
|
@ -254,6 +253,7 @@ components:
|
|||
test_measurement,,1603740794286107366,1,tag_value
|
||||
test_measurement,,1603740870053205649,2,tag_value
|
||||
test_measurement,,1603741221085428881,3,tag_value
|
||||
|
||||
Error:
|
||||
properties:
|
||||
code:
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
# This script provides a simple way grab the latest fully resolved swagger files
|
||||
# from the influxdata/openapi repo.
|
||||
#
|
||||
# Specify a context to retrieve (cloud, oss, v1compat, all).
|
||||
# Optionally specify an OSS version to write the updated swagger to.
|
||||
# The default version is the latest OSS version directory in the api-docs directory
|
||||
#
|
||||
# Syntax:
|
||||
# sh ./getswagger.sh <context> <version>
|
||||
#
|
||||
# Examples:
|
||||
# sh ./getswagger.sh cloud
|
||||
# sh .getswagger.sh oss v2.0
|
||||
|
||||
versionDirs=($(ls -d */))
|
||||
latestOSS=${versionDirs[${#versionDirs[@]}-1]}
|
||||
|
||||
context=$1
|
||||
version=${2-${latestOSS%/}}
|
||||
|
||||
function updateCloud {
|
||||
echo "Updating Cloud swagger..."
|
||||
curl https://raw.githubusercontent.com/influxdata/openapi/master/contracts/cloud.yml -s -o cloud/swagger.yml
|
||||
}
|
||||
|
||||
function updateOSS {
|
||||
echo "Updating OSS ${version} swagger..."
|
||||
curl https://raw.githubusercontent.com/influxdata/openapi/master/contracts/oss.yml -s -o ${version}/swagger.yml
|
||||
}
|
||||
|
||||
function updateV1Compat {
|
||||
echo "Updating Cloud and ${version} v1 compatibilty swagger..."
|
||||
curl https://raw.githubusercontent.com/influxdata/openapi/master/contracts/swaggerV1Compat.yml -s -o cloud/swaggerV1Compat.yml
|
||||
cp cloud/swaggerV1Compat.yml ${version}/swaggerV1Compat.yml
|
||||
}
|
||||
|
||||
if [ "$context" = "cloud" ];
|
||||
then
|
||||
updateCloud
|
||||
elif [ "$context" = "oss" ];
|
||||
then
|
||||
updateOSS
|
||||
elif [ "$context" = "v1compat" ];
|
||||
then
|
||||
updateV1Compat
|
||||
elif [ "$context" = "all" ];
|
||||
then
|
||||
updateCloud
|
||||
updateOSS
|
||||
updateV1Compat
|
||||
else
|
||||
echo "Provide a context (cloud, oss, v1compat, all)"
|
||||
fi
|
File diff suppressed because it is too large
Load Diff
|
@ -1,4 +1,3 @@
|
|||
# this is a manually maintained file for these old routes until oats#15 is resolved
|
||||
openapi: "3.0.0"
|
||||
info:
|
||||
title: Influx API Service (V1 compatible endpoints)
|
||||
|
@ -119,7 +118,7 @@ paths:
|
|||
name: Accept
|
||||
schema:
|
||||
type: string
|
||||
description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps.
|
||||
description: Specifies how query results should be encoded in the response. **Note:** When using `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps.
|
||||
default: application/json
|
||||
enum:
|
||||
- application/json
|
||||
|
@ -254,6 +253,7 @@ components:
|
|||
test_measurement,,1603740794286107366,1,tag_value
|
||||
test_measurement,,1603740870053205649,2,tag_value
|
||||
test_measurement,,1603741221085428881,3,tag_value
|
||||
|
||||
Error:
|
||||
properties:
|
||||
code:
|
||||
|
|
|
@ -103,7 +103,6 @@
|
|||
|
||||
@import "article/blocks",
|
||||
"article/buttons",
|
||||
"article/captions",
|
||||
"article/children",
|
||||
"article/code",
|
||||
"article/cloud",
|
||||
|
@ -112,7 +111,6 @@
|
|||
"article/expand",
|
||||
"article/feedback",
|
||||
"article/flex",
|
||||
"article/html-diagrams",
|
||||
"article/keybinding",
|
||||
"article/lists",
|
||||
"article/note",
|
||||
|
|
|
@ -15,14 +15,12 @@ blockquote,
|
|||
}
|
||||
|
||||
blockquote {
|
||||
padding: 1rem 1rem 1rem 1.25rem;
|
||||
border-color: rgba($article-text, .25);
|
||||
p, li {
|
||||
font-size: 1.15rem;
|
||||
font-style: italic;
|
||||
color: rgba($article-text, .5);
|
||||
}
|
||||
*:last-child {margin-bottom: 0;}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
.caption {
|
||||
margin: -2rem 0 2rem;
|
||||
padding-left: .25rem;
|
||||
font-size: .8rem;
|
||||
font-style: italic;
|
||||
opacity: .8;
|
||||
|
||||
p { line-height: 1.25rem; }
|
||||
}
|
||||
|
||||
.code-tabs-wrapper, .code-tab-content {
|
||||
& + .caption {
|
||||
margin-top: -2.75rem;
|
||||
}
|
||||
}
|
|
@ -1,129 +0,0 @@
|
|||
///////////////////////////// File System Diagrams /////////////////////////////
|
||||
|
||||
.fs-diagram {
|
||||
display: inline-block;
|
||||
margin: 1rem 0 2rem;
|
||||
padding: 1.5rem 2.5rem 1.5rem 1.5rem;
|
||||
font-family: $roboto-mono;
|
||||
border-radius: $radius;
|
||||
box-shadow: 1px 3px 10px $article-shadow;
|
||||
& > ul { padding: 0; margin: 0;
|
||||
li { line-height: 2rem; color: $article-code; }
|
||||
ul { padding-left: 2rem;
|
||||
li {
|
||||
position: relative;
|
||||
margin: 0;
|
||||
line-height: 2rem;
|
||||
border-left: 1px solid $article-code;
|
||||
&:before {
|
||||
content: "";
|
||||
display: inline-block;
|
||||
width: 1rem;
|
||||
height: .25rem;
|
||||
margin-right: .55rem;
|
||||
border-top: 1px solid $article-code;
|
||||
}
|
||||
&:last-child {
|
||||
border: none;
|
||||
&:after {
|
||||
content: "";
|
||||
position: absolute;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
left: 0;
|
||||
top: 0;
|
||||
height: 1.1rem;
|
||||
border-left: 1px solid $article-code;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ul { list-style: none; }
|
||||
}
|
||||
|
||||
///////////////////////////////// Shard diagram ////////////////////////////////
|
||||
#shard-diagram {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
max-width: 550px;
|
||||
margin: 2.5rem auto 3rem;
|
||||
|
||||
p {margin-bottom: 0; line-height: 1.25em;}
|
||||
|
||||
.periods {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
justify-content: space-between;
|
||||
}
|
||||
.timeline {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
margin-top: .25rem;
|
||||
padding: 0 .5rem;
|
||||
|
||||
.interval {
|
||||
border-top: 1px solid $article-text;
|
||||
border-right: 1px solid $article-text;
|
||||
height: .75rem;
|
||||
flex-grow: 1;
|
||||
&:first-child {
|
||||
border-left: 1px solid $article-text;
|
||||
}
|
||||
}
|
||||
}
|
||||
.shard-groups {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
justify-content: space-around;
|
||||
margin-top: .25rem;
|
||||
padding: .5rem;
|
||||
line-height: 1rem;
|
||||
|
||||
.shard-group {
|
||||
margin: 0 .25rem;
|
||||
text-align: center;
|
||||
padding: .5rem;
|
||||
border-radius: .5rem;
|
||||
background: $html-diagram-shard-group-bg;
|
||||
flex-grow: 1;
|
||||
box-shadow: 2px 2px 8px $article-shadow;
|
||||
p:first-child {margin-bottom: .75rem;}
|
||||
|
||||
.shard {
|
||||
display: block;
|
||||
margin-top: .5rem;
|
||||
padding: .65rem 1rem;
|
||||
color: #fff;
|
||||
border-radius: .25rem;
|
||||
@include gradient($article-table-header, 90deg)
|
||||
background-attachment: fixed;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////// MEDIA QUERIES ////////////////////////////////
|
||||
|
||||
@include media(small) {
|
||||
#shard-diagram {
|
||||
flex-direction: row;
|
||||
.periods {flex-direction: column; margin-right: .5rem; }
|
||||
.timeline {
|
||||
flex-direction: column;
|
||||
padding: .5rem 0;
|
||||
.interval {
|
||||
width: .75rem;
|
||||
border-top: none;
|
||||
border-right: none;
|
||||
border-left: 1px solid $article-text;
|
||||
border-bottom: 1px solid $article-text;
|
||||
&:first-child{ border-top: 1px solid $article-text; }
|
||||
}
|
||||
}
|
||||
.shard-groups {
|
||||
flex-direction: column;
|
||||
.shard-group { margin: .25rem 0;}
|
||||
}
|
||||
}
|
||||
};
|
|
@ -90,7 +90,7 @@
|
|||
margin: .75rem 0 3rem;
|
||||
width: 100%;
|
||||
|
||||
& > :not(table, .fs-diagram) {
|
||||
& > * {
|
||||
width: 100%;
|
||||
margin-left: 0;
|
||||
}
|
||||
|
|
|
@ -209,9 +209,6 @@ $svg-geo-s2-cell: $b-dodger;
|
|||
$svg-geo-region: $p-comet;
|
||||
$svg-geo-point: $br-chartreuse;
|
||||
|
||||
// HTML diagram colors
|
||||
$html-diagram-shard-group-bg: $article-table-row-alt;
|
||||
|
||||
// Diagram colors
|
||||
$diagram-arrow: $g6-smoke;
|
||||
|
||||
|
|
|
@ -209,8 +209,6 @@ $svg-geo-s2-cell: $b-hydrogen !default;
|
|||
$svg-geo-region: $br-galaxy !default;
|
||||
$svg-geo-point: $p-potassium !default;
|
||||
|
||||
$html-diagram-shard-group-bg: $g20-white !default;
|
||||
|
||||
// Diagram colors
|
||||
$diagram-arrow: $g14-chromium !default;
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ This is a paragraph. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nu
|
|||
This is a paragraph. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc rutrum, metus id scelerisque euismod, erat ante suscipit nibh, ac congue enim risus id est. Etiam tristique nisi et tristique auctor. Morbi eu bibendum erat. Sed ullamcorper, dui id lobortis efficitur, mauris odio pharetra neque, vel tempor odio dolor blandit justo.
|
||||
|
||||
#### Here's a title for this codeblock
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs-wrapper %}}
|
||||
{{% code-tabs %}}
|
||||
[Flux](#)
|
||||
[InfluxQL](#)
|
||||
|
@ -115,7 +115,7 @@ FROM "telegraf"."autogen"."mem"
|
|||
WHERE time > now() - 15m
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
{{% /code-tabs-wrapper %}}
|
||||
|
||||
{{% enterprise %}}
|
||||
### h3 This is a header3
|
||||
|
|
|
@ -98,7 +98,7 @@ To use the `influx` CLI to manage and interact with your InfluxDB Cloud instance
|
|||
|
||||
Click the following button to download and install `influx` CLI for macOS.
|
||||
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-client-2.0.5-darwin-amd64.tar.gz" download>influx CLI (macOS)</a>
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-client-2.0.4-darwin-amd64.tar.gz" download>influx CLI (macOS)</a>
|
||||
|
||||
#### Step 2: Unpackage the influx binary
|
||||
|
||||
|
@ -110,7 +110,7 @@ or run the following command in a macOS command prompt application such
|
|||
|
||||
```sh
|
||||
# Unpackage contents to the current working directory
|
||||
tar zxvf ~/Downloads/influxdb2-client-2.0.5-darwin-amd64.tar.gz
|
||||
tar zxvf ~/Downloads/influxdb2-client-2.0.4-darwin-amd64.tar.gz
|
||||
```
|
||||
|
||||
#### Step 3: (Optional) Place the binary in your $PATH
|
||||
|
@ -122,7 +122,7 @@ prefix the executable with `./` to run in place. If the binary is on your $PATH,
|
|||
|
||||
```sh
|
||||
# Copy the influx binary to your $PATH
|
||||
sudo cp influxdb2-client-2.0.5-darwin-amd64/influx /usr/local/bin/
|
||||
sudo cp influxdb2-client-2.0.4-darwin-amd64/influx /usr/local/bin/
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
|
@ -166,8 +166,8 @@ To see all available `influx` commands, type `influx -h` or check out [influx -
|
|||
|
||||
Click one of the following buttons to download and install the `influx` CLI appropriate for your chipset.
|
||||
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-client-2.0.5-linux-amd64.tar.gz" download >influx CLI (amd64)</a>
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-client-2.0.5-linux-arm64.tar.gz" download >influx CLI (arm)</a>
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-client-2.0.4-linux-amd64.tar.gz" download >influx CLI (amd64)</a>
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-client-2.0.4-linux-arm64.tar.gz" download >influx CLI (arm)</a>
|
||||
|
||||
#### Step 2: Unpackage the influx binary
|
||||
|
||||
|
@ -175,7 +175,7 @@ Click one of the following buttons to download and install the `influx` CLI appr
|
|||
|
||||
```sh
|
||||
# Unpackage contents to the current working directory
|
||||
tar xvfz influxdb-client-2.0.5-linux-amd64.tar.gz
|
||||
tar xvfz influxdb-client-2.0.4-linux-amd64.tar.gz
|
||||
```
|
||||
|
||||
#### Step 3: (Optional) Place the binary in your $PATH
|
||||
|
@ -187,7 +187,7 @@ prefix the executable with `./` to run in place. If the binary is on your $PATH,
|
|||
|
||||
```sh
|
||||
# Copy the influx and influxd binary to your $PATH
|
||||
sudo cp influxdb-client-2.0.5-linux-amd64/influx /usr/local/bin/
|
||||
sudo cp influxdb-client-2.0.4-linux-amd64/influx /usr/local/bin/
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
|
|
|
@ -55,7 +55,7 @@ To view a summary of what's included in a template before applying the template,
|
|||
use the [`influx template` command](/influxdb/cloud/reference/cli/influx/template/).
|
||||
View a summary of a template stored in your local filesystem or from a URL.
|
||||
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs-wrapper %}}
|
||||
{{% code-tabs %}}
|
||||
[From a file](#)
|
||||
[From a URL](#)
|
||||
|
@ -78,14 +78,14 @@ influx template -u <template-url>
|
|||
influx template -u https://raw.githubusercontent.com/influxdata/community-templates/master/linux_system/linux_system.yml
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
{{% /code-tabs-wrapper %}}
|
||||
|
||||
## Validate a template
|
||||
To validate a template before you install it or troubleshoot a template, use
|
||||
the [`influx template validate` command](/influxdb/cloud/reference/cli/influx/template/validate/).
|
||||
Validate a template stored in your local filesystem or from a URL.
|
||||
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs-wrapper %}}
|
||||
{{% code-tabs %}}
|
||||
[From a file](#)
|
||||
[From a URL](#)
|
||||
|
@ -108,7 +108,7 @@ influx template validate -u <template-url>
|
|||
influx template validate -u https://raw.githubusercontent.com/influxdata/community-templates/master/linux_system/linux_system.yml
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
{{% /code-tabs-wrapper %}}
|
||||
|
||||
## Apply templates
|
||||
Use the [`influx apply` command](/influxdb/cloud/reference/cli/influx/apply/) to install templates
|
||||
|
|
|
@ -40,10 +40,6 @@ To send notifications about changes in your data, start by creating a notificati
|
|||
|
||||
- For PagerDuty:
|
||||
- [Create a new service](https://support.pagerduty.com/docs/services-and-integrations#section-create-a-new-service), [add an integration for your service](https://support.pagerduty.com/docs/services-and-integrations#section-add-integrations-to-an-existing-service), and then enter the PagerDuty integration key for your new service in the **Routing Key** field.
|
||||
- The **Client URL** provides a useful link in your PagerDuty notification. Enter any URL that you'd like to use to investigate issues. This URL is sent as the `client_url` property in the PagerDuty trigger event. By default, the **Client URL** is set to your Monitoring & Alerting History page, and the following included in the PagerDuty trigger event:
|
||||
|
||||
```json
|
||||
"client_url": "https://cloud2.influxdata.com/orgs/<your-org-ID>/alert-history"
|
||||
```
|
||||
- The **Client URL** provides a useful link in your PagerDuty notification. Enter any URL that you'd like to use to investigate issues. This URL is sent as the `client_url` property in the PagerDuty trigger event. By default, the **Client URL** is set to your Monitoring & Alerting History page, and the following included in the PagerDuty trigger event: `"client_url": "https://us-west-2-1.aws.cloud2.influxdata.net/orgs/<your-org-ID>/alert-history”`
|
||||
|
||||
6. Click **Create Notification Endpoint**.
|
||||
|
|
|
@ -11,8 +11,7 @@ influxdb/cloud/tags: [buckets]
|
|||
---
|
||||
|
||||
A **bucket** is a named location where time series data is stored.
|
||||
All buckets have a **retention period**, a duration of time that each data point persists.
|
||||
InfluxDB drops all points with timestamps older than the bucket's retention period.
|
||||
All buckets have a **retention policy**, a duration of time that each data point persists.
|
||||
A bucket belongs to an organization.
|
||||
|
||||
The following articles provide information about managing buckets:
|
||||
|
|
|
@ -26,7 +26,7 @@ There are two places you can create a bucket in the UI.
|
|||
3. Enter a **Name** for the bucket.
|
||||
4. Select when to **Delete Data**:
|
||||
- **Never** to retain data forever.
|
||||
- **Older than** to choose a specific retention period.
|
||||
- **Older than** to choose a specific retention policy.
|
||||
5. Click **Create** to create the bucket.
|
||||
|
||||
### Create a bucket in the Data Explorer
|
||||
|
@ -39,7 +39,7 @@ There are two places you can create a bucket in the UI.
|
|||
3. Enter a **Name** for the bucket.
|
||||
4. Select when to **Delete Data**:
|
||||
- **Never** to retain data forever.
|
||||
- **Older than** to choose a specific retention period.
|
||||
- **Older than** to choose a specific retention policy.
|
||||
5. Click **Create** to create the bucket.
|
||||
|
||||
## Create a bucket using the influx CLI
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: Update a bucket
|
||||
seotitle: Update a bucket in InfluxDB
|
||||
description: Update a bucket's name or retention period in InfluxDB using the InfluxDB UI or the influx CLI.
|
||||
description: Update a bucket's name or retention policy in InfluxDB using the InfluxDB UI or the influx CLI.
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Update a bucket
|
||||
|
@ -32,20 +32,20 @@ If you change a bucket name, be sure to update the bucket in the above places as
|
|||
3. Review the information in the window that appears and click **I understand, let's rename my bucket**.
|
||||
4. Update the bucket's name and click **Change Bucket Name**.
|
||||
|
||||
## Update a bucket's retention period in the InfluxDB UI
|
||||
## Update a bucket's retention policy in the InfluxDB UI
|
||||
|
||||
1. In the navigation menu on the left, select **Data (Load Data)** > **Buckets**.
|
||||
|
||||
{{< nav-icon "data" >}}
|
||||
|
||||
|
||||
2. Click **Settings** next to the bucket you want to update.
|
||||
3. In the window that appears, under **Delete data**, select a retention period:
|
||||
|
||||
- **Never**: data in the bucket is retained indefinitely.
|
||||
- **Older Than**: select a predefined retention period from the dropdown menu.
|
||||
|
||||
|
||||
{{% note %}}
|
||||
Use the [`influx bucket update` command](#update-a-buckets-retention-period) to set a custom retention period.
|
||||
Use the [`influx bucket update` command](#update-a-buckets-retention-policy) to set a custom retention policy.
|
||||
{{% /note %}}
|
||||
5. Click **Save Changes**.
|
||||
|
||||
|
@ -67,9 +67,9 @@ influx bucket update -i <bucket-id> -o <org-name> -n <new-bucket-name>
|
|||
influx bucket update -i 034ad714fdd6f000 -o my-org -n my-new-bucket
|
||||
```
|
||||
|
||||
##### Update a bucket's retention period
|
||||
##### Update a bucket's retention policy
|
||||
|
||||
Valid retention period duration units are nanoseconds (`ns`), microseconds (`us` or `µs`), milliseconds (`ms`), seconds (`s`), minutes (`m`), hours (`h`), days (`d`), or weeks (`w`).
|
||||
Valid retention policy duration units are nanoseconds (`ns`), microseconds (`us` or `µs`), milliseconds (`ms`), seconds (`s`), minutes (`m`), hours (`h`), days (`d`), or weeks (`w`).
|
||||
|
||||
```sh
|
||||
# Syntax
|
||||
|
|
|
@ -74,7 +74,7 @@ Once your task is ready, see [Create a task](/influxdb/cloud/process-data/manage
|
|||
## Things to consider
|
||||
- If there is a chance that data may arrive late, specify an `offset` in your
|
||||
task options long enough to account for late-data.
|
||||
- If running a task against a bucket with a finite retention period, do not schedule
|
||||
tasks to run too closely to the end of the retention period.
|
||||
- If running a task against a bucket with a finite retention policy, do not schedule
|
||||
tasks to run too closely to the end of the retention policy.
|
||||
Always provide a "cushion" for downsampling tasks to complete before the data
|
||||
is dropped by the retention period.
|
||||
is dropped by the retention policy.
|
||||
|
|
|
@ -13,4 +13,32 @@ related:
|
|||
- /influxdb/cloud/reference/cli/influx/task/retry
|
||||
---
|
||||
|
||||
{{< duplicate-oss >}}
|
||||
InfluxDB data processing tasks generally run in defined intervals or at a specific time,
|
||||
however, you can manually run a task from the InfluxDB user interface (UI) or the
|
||||
`influx` command line interface (CLI).
|
||||
|
||||
## Run a task from the InfluxDB UI
|
||||
1. In the navigation menu on the left, select **Tasks**.
|
||||
|
||||
{{< nav-icon "tasks" >}}
|
||||
|
||||
2. Hover over the task you want to run and click the **{{< icon "gear" >}}** icon.
|
||||
3. Select **Run Task**.
|
||||
|
||||
## Run a task with the influx CLI
|
||||
Use the `influx task run retry` command to run a task.
|
||||
|
||||
{{% note %}}
|
||||
To run a task from the `influx` CLI, the task must have already run at least once.
|
||||
{{% /note %}}
|
||||
|
||||
```sh
|
||||
# List all tasks to find the ID of the task to run
|
||||
influx task list
|
||||
|
||||
# Use the task ID to list previous runs of the task
|
||||
influx task run list --task-id=0000000000000000
|
||||
|
||||
# Use the task ID and run ID to retry a run
|
||||
influx task run retry --task-id=0000000000000000 --run-id=0000000000000000
|
||||
```
|
||||
|
|
|
@ -11,4 +11,36 @@ related:
|
|||
- /influxdb/cloud/reference/cli/influx/task/run/find
|
||||
---
|
||||
|
||||
{{< duplicate-oss >}}
|
||||
When an InfluxDB task runs, a "run" record is created in the task's history.
|
||||
Logs associated with each run provide relevant log messages, timestamps,
|
||||
and the exit status of the run attempt.
|
||||
|
||||
Use the InfluxDB user interface (UI) or the `influx` command line interface (CLI)
|
||||
to view task run histories and associated logs.
|
||||
|
||||
## View a task's run history in the InfluxDB UI
|
||||
|
||||
1. In the navigation menu on the left, select **Tasks**.
|
||||
|
||||
{{< nav-icon "tasks" >}}
|
||||
|
||||
2. Hover over the task you want to run and click the **{{< icon "gear" >}}** icon.
|
||||
3. Select **View Task Runs**.
|
||||
|
||||
### View task run logs
|
||||
To view logs associated with a run, click **View Logs** next to the run in the task's run history.
|
||||
|
||||
## View a task's run history with the influx CLI
|
||||
Use the `influx task run list` command to view a task's run history.
|
||||
|
||||
```sh
|
||||
# List all tasks to find the ID of the task to run
|
||||
influx task list
|
||||
|
||||
# Use the task ID to view the run history of a task
|
||||
influx task run list --task-id=0000000000000000
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
Detailed run logs are not currently available in the `influx` CLI.
|
||||
{{% /note %}}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: Create custom Flux functions
|
||||
description: Create your own custom Flux functions to transform and operate on data.
|
||||
description: Create your own custom Flux functions to transform and manipulate data.
|
||||
list_title: Custom functions
|
||||
influxdb/cloud/tags: [functions, custom, flux]
|
||||
menu:
|
||||
|
|
|
@ -10,4 +10,69 @@ menu:
|
|||
influxdb/cloud/tags: [query]
|
||||
---
|
||||
|
||||
{{< duplicate-oss >}}
|
||||
Optimize your Flux queries to reduce their memory and compute (CPU) requirements.
|
||||
|
||||
- [Start queries with pushdown functions](#start-queries-with-pushdown-functions)
|
||||
- [Avoid short window durations](#avoid-short-window-durations)
|
||||
- [Use "heavy" functions sparingly](#use-heavy-functions-sparingly)
|
||||
- [Balance time range and data precision](#balance-time-range-and-data-precision)
|
||||
|
||||
## Start queries with pushdown functions
|
||||
Some Flux functions can push their data manipulation down to the underlying
|
||||
data source rather than storing and manipulating data in memory.
|
||||
These are known as "pushdown" functions and using them correctly can greatly
|
||||
reduce the amount of memory necessary to run a query.
|
||||
|
||||
#### Pushdown functions
|
||||
- [range()](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/range/)
|
||||
- [filter()](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/filter/)
|
||||
- [group()](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/group/)
|
||||
- [count()](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/aggregates/count/)
|
||||
- [sum()](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/aggregates/sum/)
|
||||
- [first()](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/selectors/first/)
|
||||
- [last()](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/selectors/last/)
|
||||
|
||||
Use pushdown functions at the beginning of your query.
|
||||
Once a non-pushdown function runs, Flux pulls data into memory and runs all
|
||||
subsequent operations there.
|
||||
|
||||
##### Pushdown functions in use
|
||||
```js
|
||||
from(bucket: "example-bucket")
|
||||
|> range(start: -1h) //
|
||||
|> filter(fn: (r) => r.sensor == "abc123") // Pushed to the data source
|
||||
|> group(columns: ["_field", "host"]) //
|
||||
|
||||
|> aggregateWindow(every: 5m, fn: max) //
|
||||
|> filter(fn: (r) => r._value >= 90.0) // Run in memory
|
||||
|> top(n: 10) //
|
||||
```
|
||||
|
||||
## Avoid short window durations
|
||||
Windowing (grouping data based on time intervals) is commonly used to aggregate and downsample data.
|
||||
Increase performance by avoiding short window durations.
|
||||
More windows require more compute power to evaluate which window each row should be assigned to.
|
||||
Reasonable window durations depend on the total time range queried.
|
||||
|
||||
## Use "heavy" functions sparingly
|
||||
The following functions use more memory or CPU than others.
|
||||
Consider their necessity in your data processing before using them:
|
||||
|
||||
- [map()](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/map/)
|
||||
- [reduce()](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/aggregates/reduce/)
|
||||
- [window()](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/window/)
|
||||
- [join()](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/join/)
|
||||
- [union()](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/union/)
|
||||
- [pivot()](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/pivot/)
|
||||
|
||||
{{% note %}}
|
||||
We're continually optimizing Flux and this list may not represent its current state.
|
||||
{{% /note %}}
|
||||
|
||||
## Balance time range and data precision
|
||||
To ensure queries are performant, balance the time range and the precision of your data.
|
||||
For example, if you query data stored every second and request six months worth of data,
|
||||
results would include ≈15.5 million points per series. Depending on the number of series returned after `filter()`([cardinality](/influxdb/cloud/reference/glossary/#series-cardinality)), this can quickly become many billions of points.
|
||||
Flux must store these points in memory to generate a response. Use [pushdown functions](#pushdown-functions) to optimize how many points are stored in memory.
|
||||
|
||||
To query data over large periods of time, create a task to [downsample data](/influxdb/cloud/process-data/common-tasks/downsample-data/), and then query the downsampled data instead.
|
||||
|
|
|
@ -14,57 +14,4 @@ related:
|
|||
- /influxdb/cloud/query-data/influxql
|
||||
---
|
||||
|
||||
The InfluxDB v2 API includes InfluxDB 1.x compatibility `/write` and `/query`
|
||||
endpoints that work with InfluxDB 1.x client libraries and third-party integrations
|
||||
like [Grafana](https://grafana.com) and others.
|
||||
|
||||
<a class="btn" href="/influxdb/cloud/api/v1-compatibility/">View full v1 compatibility API documentation</a>
|
||||
|
||||
## Authentication
|
||||
InfluxDB Cloud all query and write requests to be authenticated using
|
||||
[InfluxDB authentication tokens](/influxdb/cloud/security/tokens/).
|
||||
Use the following authenication methods:
|
||||
|
||||
- [Token authentication](#token-authentication)
|
||||
- [Basic authentication](#basic-authentication)
|
||||
|
||||
### Token authentication
|
||||
Token authentication requires the following credential:
|
||||
|
||||
- **token**: InfluxDB [authentication token](/influxdb/cloud/security/tokens/)
|
||||
|
||||
Use the `Authorization` header with the `Token` scheme to provide your
|
||||
authentication token to InfluxDB.
|
||||
|
||||
##### Token authentication with authorization header
|
||||
```sh
|
||||
# Header syntax
|
||||
Authorization: Token <token>
|
||||
|
||||
# Header example
|
||||
Authorization: Token mYSuP3rs3cREtT0k3N
|
||||
```
|
||||
|
||||
### Basic authentication
|
||||
Basic authentication requires the following credentials:
|
||||
|
||||
- **username**: InfluxDB Cloud username
|
||||
- **password**: InfluxDB Cloud [authentication token](/influxdb/cloud/security/tokens/)
|
||||
|
||||
```sh
|
||||
# --user syntax
|
||||
<username>:<password>
|
||||
```
|
||||
|
||||
## InfluxQL support
|
||||
|
||||
The compatibility API supports InfluxQL, with the following caveats:
|
||||
|
||||
- The `INTO` clause (e.g. `SELECT ... INTO ...`) is not supported.
|
||||
- With the exception of [`DELETE`](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#delete-series-with-delete) and
|
||||
[`DROP MEASUREMENT`](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#delete-measurements-with-drop-measurement) queries, which are still allowed,
|
||||
InfluxQL database management commands are not supported.
|
||||
|
||||
## Compatibility endpoints
|
||||
|
||||
{{< children readmore=true >}}
|
||||
{{< duplicate-oss >}}
|
|
@ -1,11 +0,0 @@
|
|||
---
|
||||
title: influx task retry-failed
|
||||
description: The `influx task retry-failed` command retries failed InfluxDB task runs.
|
||||
menu:
|
||||
influxdb_cloud_ref:
|
||||
name: influx task retry-failed
|
||||
parent: influx task
|
||||
weight: 201
|
||||
---
|
||||
|
||||
{{< duplicate-oss >}}
|
|
@ -2,6 +2,12 @@
|
|||
title: influx transpile
|
||||
description: >
|
||||
The `influx transpile` command transpiles an InfluxQL query to Flux source code.
|
||||
menu:
|
||||
influxdb_cloud_ref:
|
||||
name: influx transpile
|
||||
parent: influx
|
||||
weight: 101
|
||||
influxdb/cloud/tags: [influxql, flux]
|
||||
related:
|
||||
- /influxdb/cloud/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials
|
||||
- /influxdb/cloud/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions
|
||||
|
|
|
@ -161,7 +161,7 @@ Define a custom polygon region using a record containing the following propertie
|
|||
```
|
||||
|
||||
## GIS geometry definitions
|
||||
Many functions in the Geo package operate on data using geographic information system (GIS) data.
|
||||
Many functions in the Geo package manipulate data based on geographic information system (GIS) data.
|
||||
Define GIS geometry using the following:
|
||||
|
||||
- Any [region type](#region-definitions) _(typically [point](#point))_
|
||||
|
|
|
@ -12,20 +12,4 @@ menu:
|
|||
weight: 103
|
||||
---
|
||||
|
||||
InfluxDB **authentication tokens** ensure secure interaction between users and data.
|
||||
A token belongs to an organization and identifies InfluxDB permissions within the organization.
|
||||
|
||||
Learn how to create, view, update, or delete an authentication token.
|
||||
|
||||
## Authentication token types
|
||||
|
||||
- [All-Access token](#all-access-token)
|
||||
- [Read/Write token](#readwrite-token)
|
||||
|
||||
#### All-Access token
|
||||
Grants full read and write access to all resources in an organization.
|
||||
|
||||
#### Read/Write token
|
||||
Grants read access, write access, or both to specific buckets in an organization.
|
||||
|
||||
{{< children hlevel="h2" >}}
|
||||
{{< duplicate-oss >}}
|
||||
|
|
|
@ -90,10 +90,10 @@ To query InfluxDB Cloud from Grafana using InfluxQL:
|
|||
|
||||
```sh
|
||||
influx config create \
|
||||
--config-name example-config-name \
|
||||
--host-url https://cloud2.influxdata.com \
|
||||
--org example-org \
|
||||
--token My5uP3rSeCr37t0k3n
|
||||
--token My5uP3rSeCr37t0k3n \
|
||||
--name example-config-name
|
||||
```
|
||||
|
||||
For more information about `influx` CLI configurations,
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
---
|
||||
title: Upgrade to InfluxDB Cloud
|
||||
description: >
|
||||
Upgrade to InfluxDB Cloud from InfluxDB OSS 1.x and 2.x and InfluxDB Enterprise 1.x.
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Upgrade to Cloud
|
||||
weight: 2
|
||||
---
|
||||
|
||||
Upgrade to InfluxDB Cloud from InfluxDB 1.x and 2.x:
|
||||
|
||||
{{< children >}}
|
|
@ -1,268 +0,0 @@
|
|||
---
|
||||
title: Upgrade from InfluxDB 1.x to InfluxDB Cloud
|
||||
description: >
|
||||
To upgrade from InfluxDB 1.x to InfluxDB Cloud, migrate data, and then create
|
||||
database and retention policy (DBRP) mappings.
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
parent: Upgrade to Cloud
|
||||
name: 1.x to Cloud
|
||||
weight: 11
|
||||
related:
|
||||
- /influxdb/cloud/upgrade/v1-to-cloud/migrate-cqs/
|
||||
---
|
||||
|
||||
To upgrade from **InfluxDB OSS 1.x** to **InfluxDB Cloud**:
|
||||
|
||||
1. [Create an InfluxDB Cloud account](#create-an-influxdb-cloud-account)
|
||||
2. [Create an All-Access authentication token](#create-an-all-access-authentication-token)
|
||||
3. [Download and install the `influx` CLI](#download-and-install-the-influx-cli)
|
||||
4. [Create DBRP mappings](#create-dbrp-mappings)
|
||||
5. [Dual write to InfluxDB 1.x and InfluxDB Cloud](#dual-write-to-influxdb-1x-and-influxdb-cloud)
|
||||
6. [Migrate time series data](#migrate-time-series-data)
|
||||
7. [Migrate continuous queries](#migrate-continuous-queries)
|
||||
8. [Collaborate with other users](#collaborate-with-other-users)
|
||||
|
||||
## Create an InfluxDB Cloud account
|
||||
Do one of the following to create an InfluxDB Cloud account:
|
||||
|
||||
- [Subscribe through InfluxData](/influxdb/cloud/get-started/#subscribe-through-influxdata) and
|
||||
[start for free](/influxdb/cloud/get-started/#start-for-free).
|
||||
- [Subscribe through your cloud provider](/influxdb/cloud/get-started/#subscribe-through-a-cloud-provider).
|
||||
|
||||
## Create an All-Access authentication token
|
||||
InfluxDB Cloud requires all requests to be authenticated with **token authentication**.
|
||||
Create an **All-Access** token in your InfluxDB Cloud user interface (UI) to use
|
||||
for the upgrade process.
|
||||
|
||||
1. Click **Data (Load Data) > Tokens** in the left navigation bar.
|
||||
|
||||
{{< nav-icon "data" >}}
|
||||
2. Click **{{< icon "plus" >}} Generate**, and then select **All-Access Token**.
|
||||
3. Enter a description for the token, and then click **{{< icon "check" >}} Save**.
|
||||
|
||||
_For more information about managing tokens and token types, see [Manage tokens](/influxdb/cloud/security/tokens/)._
|
||||
|
||||
## Download and install the influx CLI
|
||||
1. Visit the [InfluxDB downloads page](https://portal.influxdata.com/downloads/)
|
||||
and download the **InfluxDB Cloud CLI** (`influx`).
|
||||
2. Place the `influx` binary in your system `PATH` or execute the CLI commands from
|
||||
the directory where the `influx` CLI exists.
|
||||
|
||||
3. [Create a CLI connection configuration](/influxdb/cloud/reference/cli/influx/#provide-required-authentication-credentials)
|
||||
for your InfluxDB Cloud account.
|
||||
Include the following flags:
|
||||
|
||||
- **-\-config-name**:
|
||||
Unique name for the connection configuration.
|
||||
- **-\-host-url**:
|
||||
[InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/).
|
||||
- **-\-org**:
|
||||
InfluxDB Cloud organization name.
|
||||
The default organization name is the email address associated with your account.
|
||||
- **-\-token**:
|
||||
InfluxDB Cloud **All-Access** token.
|
||||
|
||||
```sh
|
||||
influx config create \
|
||||
--config-name cloud \
|
||||
--host-url https://cloud2.influxdata.com \
|
||||
--org your.email@example.com \
|
||||
--token mY5uP3rS3cRe7Cl0uDt0K3n \
|
||||
--active
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
#### Required InfluxDB Cloud credentials
|
||||
All `influx` CLI examples below assume the required InfluxDB Cloud **host**,
|
||||
**organization**, and **authentication token** credentials are provided by your
|
||||
[`influx` CLI configuration](/influxdb/cloud/reference/cli/influx/#provide-required-authentication-credentials).
|
||||
{{% /note %}}
|
||||
|
||||
## Create DBRP mappings
|
||||
InfluxDB database and retention policy (DBRP) mappings associate database and
|
||||
retention policy combinations with InfluxDB cloud [buckets](/influxdb/cloud/reference/glossary/#bucket).
|
||||
These mappings allow InfluxDB 1.x clients to query and write to
|
||||
InfluxDB Cloud buckets while using the 1.x DBRP convention.
|
||||
|
||||
_For more information about DBRP mapping, see
|
||||
[Database and retention policy mapping](/influxdb/cloud/reference/api/influxdb-1x/dbrp/)._
|
||||
|
||||
**To map a DBRP combination to an InfluxDB Cloud bucket**
|
||||
|
||||
1. **Create a bucket**
|
||||
[Create an InfluxDB Cloud bucket](/influxdb/cloud/organizations/buckets/create-bucket/).
|
||||
We recommend creating a bucket for each unique 1.x database and retention
|
||||
policy combination using the following naming convention:
|
||||
|
||||
```sh
|
||||
# Naming convention
|
||||
db-name/rp-name
|
||||
|
||||
# Example
|
||||
telegraf/autogen
|
||||
```
|
||||
|
||||
2. **Create a DBRP mapping**
|
||||
Use the [`influx v1 dbrp create` command](/influxdb/cloud/reference/cli/influx/v1/dbrp/create/)
|
||||
to create a DBRP mapping.
|
||||
Provide the following:
|
||||
|
||||
- database name
|
||||
- retention policy name _(not retention period)_
|
||||
- [bucket ID](/influxdb/cloud/organizations/buckets/view-buckets/)
|
||||
- _(optional)_ `--default` flag if you want the retention policy to be the default retention
|
||||
policy for the specified database
|
||||
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[DB with one RP](#)
|
||||
[DB with multiple RPs](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```sh
|
||||
influx v1 dbrp create \
|
||||
--db example-db \
|
||||
--rp example-rp \
|
||||
--bucket-id 00xX00o0X001 \
|
||||
--default
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```sh
|
||||
# Create telegraf/autogen DBRP mapping with autogen
|
||||
# as the default RP for the telegraf DB
|
||||
|
||||
influx v1 dbrp create \
|
||||
--db telegraf \
|
||||
--rp autogen \
|
||||
--bucket-id 00xX00o0X001 \
|
||||
--default
|
||||
|
||||
# Create telegraf/downsampled-daily DBRP mapping that
|
||||
# writes to a different bucket
|
||||
|
||||
influx v1 dbrp create \
|
||||
--db telegraf \
|
||||
--rp downsampled-daily \
|
||||
--bucket-id 00xX00o0X002
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
{{% caption %}}
|
||||
See [Required InfluxDB Cloud credentials](#required-influxdb-cloud-credentials)
|
||||
{{% /caption %}}
|
||||
|
||||
## Dual write to InfluxDB 1.x and InfluxDB Cloud
|
||||
Update external clients to write to your InfluxDB Cloud instance.
|
||||
**We recommend writing data to both InfluxDB 1.x and InfluxDB Cloud until you
|
||||
finish [migrating your existing time series data](#migrate-time-series-data)**.
|
||||
|
||||
Configure external clients with your InfluxDB Cloud **host**, **organization**,
|
||||
and **authentication token**.
|
||||
|
||||
### Update Telegraf configurations
|
||||
If using Telegraf to collect and write metrics to InfluxDB 1.x, update your
|
||||
Telegraf configuration to write to both InfluxDB 1.x and InfluxDB Cloud:
|
||||
|
||||
1. Update your Telegraf configuration with a `influxdb_v2` output to write to
|
||||
your InfluxDB Cloud instance.
|
||||
|
||||
##### Example dual-write Telegraf configuration
|
||||
```toml
|
||||
# Write metrics to InfluxDB 1.x
|
||||
[[outputs.influxdb]]
|
||||
urls = ["https://localhost:8086"]
|
||||
database = "example-db"
|
||||
retention_policy = "example-rp"
|
||||
|
||||
# Write metrics to InfluxDB Cloud
|
||||
[[outputs.influxdb_v2]]
|
||||
urls = ["https://cloud2.influxdata.com"]
|
||||
token = "$INFLUX_TOKEN"
|
||||
organization = "your.email@example.com"
|
||||
bucket = "example-db/example-rp"
|
||||
```
|
||||
|
||||
2. Add the `INFLUX_TOKEN` environment variable to your Telegraf environment(s)
|
||||
and set the value to your InfluxDB Cloud authentication token.
|
||||
|
||||
3. Restart Telegraf with the updated configuration and begin writing to both
|
||||
InfluxDB 1.x and InfluxDB Cloud.
|
||||
|
||||
## Migrate time series data
|
||||
To migrate time series data from your InfluxDB 1.x instance to InfluxDB Cloud:
|
||||
|
||||
1. Use the **InfluxDB 1.x** [`influx_inspect export` command](/{{< latest "influxdb" "v1" >}}/tools/influx_inspect/#export)
|
||||
to export time series data as line protocol.
|
||||
Include the `-lponly` flag to exclude comments and the data definition
|
||||
language (DDL) from the output file.
|
||||
|
||||
_We recommend exporting each DBRP combination separately to easily write data
|
||||
to a corresponding InfluxDB Cloud bucket._
|
||||
|
||||
```sh
|
||||
# Syntax
|
||||
influx_inspect export \
|
||||
-database <database-name> \
|
||||
-retention <retention-policy-name> \
|
||||
-out <output-file-path> \
|
||||
-lponly
|
||||
|
||||
# Example
|
||||
influx_inspect export \
|
||||
-database example-db \
|
||||
-retention example-rp \
|
||||
-out /path/to/example-db_example-rp.lp \
|
||||
-lponly
|
||||
```
|
||||
|
||||
2. Use the **InfluxDB Cloud** [`influx write` command](/influxdb/cloud/reference/cli/influx/write/)
|
||||
to write the exported line protocol to InfluxDB Cloud.
|
||||
|
||||
```sh
|
||||
# Syntax
|
||||
influx write \
|
||||
--bucket <bucket-name> \
|
||||
--file <path-to-line-protocol-file>
|
||||
|
||||
# Example
|
||||
influx write \
|
||||
--bucket example-db/example-rp \
|
||||
--file /path/to/example-db_example-rp.lp
|
||||
```
|
||||
{{% caption %}}
|
||||
See [Required InfluxDB Cloud credentials](#required-influxdb-cloud-credentials)
|
||||
{{% /caption %}}
|
||||
|
||||
3. Repeat steps 1-2 for each bucket.
|
||||
|
||||
{{% note %}}
|
||||
#### InfluxDB Cloud write rate limits
|
||||
Write requests are subject to rate limits associated with your
|
||||
[InfluxDB Cloud pricing plan](/influxdb/cloud/account-management/pricing-plans/).
|
||||
If your exported line protocol size potentially exceeds your rate limits,
|
||||
include the `--rate-limit` flag with `influx write` to rate limit written data.
|
||||
|
||||
```sh
|
||||
influx write \
|
||||
--bucket example-bucket \
|
||||
--file /path/to/example-db_example-rp.lp \
|
||||
--rate-limit "5 MB / 5 min"
|
||||
```
|
||||
{{% caption %}}
|
||||
See [Required InfluxDB Cloud credentials](#required-influxdb-cloud-credentials)
|
||||
{{% /caption %}}
|
||||
|
||||
To minimize network bandwidth usage, we recommend using gzip to compress exported line protocol.
|
||||
However, when writing to InfluxDB Cloud, **Data In** and **Ingest batch size**
|
||||
rate limits track the payload size of the **uncompressed** line protocol.
|
||||
{{% /note %}}
|
||||
|
||||
## Migrate continuous queries
|
||||
For information about migrating InfluxDB 1.x continuous queries to InfluxDB Cloud tasks,
|
||||
see [Migrate continuous queries to tasks](/influxdb/cloud/upgrade/v1-to-cloud/migrate-cqs/).
|
||||
|
||||
## Collaborate with other users
|
||||
To collaborate with other users in your InfluxDB Cloud organization,
|
||||
[invite users to join your organization](/influxdb/cloud/account-management/multi-user/invite-user/).
|
|
@ -1,370 +0,0 @@
|
|||
---
|
||||
title: Migrate continuous queries to tasks
|
||||
description: >
|
||||
InfluxDB Cloud replaces 1.x continuous queries (CQs) with **InfluxDB tasks**.
|
||||
To migrate continuous queries to InfluxDB Cloud, convert InfluxDB 1.x CQs into
|
||||
Flux and create new InfluxDB Cloud tasks.
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
parent: 1.x to Cloud
|
||||
name: Migrate CQs
|
||||
weight: 102
|
||||
related:
|
||||
- /influxdb/cloud/query-data/get-started/
|
||||
- /influxdb/cloud/query-data/flux/
|
||||
- /influxdb/cloud/process-data/
|
||||
- /influxdb/cloud/process-data/common-tasks/
|
||||
- /influxdb/cloud/reference/flux/flux-vs-influxql/
|
||||
---
|
||||
|
||||
InfluxDB Cloud replaces 1.x continuous queries (CQs) with **InfluxDB tasks**.
|
||||
To migrate continuous queries to InfluxDB Cloud tasks, do the following:
|
||||
|
||||
1. [Output all InfluxDB 1.x continuous queries](#output-all-influxdb-1x-continuous-queries)
|
||||
2. [Convert continuous queries to Flux queries](#convert-continuous-queries-to-flux-queries)
|
||||
3. [Create new InfluxDB tasks](#create-new-influxdb-tasks)
|
||||
|
||||
## Output all InfluxDB 1.x continuous queries
|
||||
|
||||
1. Use the **InfluxDB 1.x `influx` interactive shell** to run `SHOW CONTINUOUS QUERIES`:
|
||||
|
||||
{{< keep-url >}}
|
||||
```sh
|
||||
$ influx
|
||||
Connected to http://localhost:8086 version 1.8.5
|
||||
InfluxDB shell version: 1.8.5
|
||||
> SHOW CONTINUOUS QUERIES
|
||||
```
|
||||
|
||||
2. Copy and save the displayed continuous queries.
|
||||
|
||||
## Convert continuous queries to Flux queries
|
||||
|
||||
To migrate InfluxDB 1.x continuous queries to InfluxDB Cloud tasks, convert the InfluxQL query syntax to Flux.
|
||||
The majority of continuous queries are simple downsampling queries and can be converted quickly
|
||||
using the [`aggregateWindow()` function](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/aggregates/aggregatewindow/).
|
||||
For example:
|
||||
|
||||
##### Example continuous query
|
||||
```sql
|
||||
CREATE CONTINUOUS QUERY "downsample-daily" ON "my-db"
|
||||
BEGIN
|
||||
SELECT mean("example-field")
|
||||
INTO "my-db"."example-rp"."average-example-measurement"
|
||||
FROM "example-measurement"
|
||||
GROUP BY time(1h)
|
||||
END
|
||||
```
|
||||
|
||||
##### Equivalent Flux task
|
||||
```js
|
||||
option task = {
|
||||
name: "downsample-daily",
|
||||
every: 1d
|
||||
}
|
||||
|
||||
from(bucket: "my-db/")
|
||||
|> range(start: -task.every)
|
||||
|> filter(fn: (r) => r._measurement == "example-measurement")
|
||||
|> filter(fn: (r) => r._field == "example-field")
|
||||
|> aggregateWindow(every: 1h, fn: mean)
|
||||
|> set(key: "_measurement", value: "average-example-measurement")
|
||||
|> to(
|
||||
org: "example-org",
|
||||
bucket: "my-db/example-rp"
|
||||
)
|
||||
```
|
||||
|
||||
### Convert InfluxQL continuous queries to Flux
|
||||
Review the following statements and clauses to see how to convert your CQs to Flux:
|
||||
|
||||
- [ON clause](#on-clause)
|
||||
- [SELECT statement](#select-statement)
|
||||
- [INTO clause](#into-clause)
|
||||
- [FROM clause](#from-clause)
|
||||
- [AS clause](#as-clause)
|
||||
- [WHERE clause](#where-clause)
|
||||
- [GROUP BY clause](#group-by-clause)
|
||||
- [RESAMPLE clause](#resample-clause)
|
||||
|
||||
#### ON clause
|
||||
The `ON` clause defines the database to query.
|
||||
In InfluxDB Cloud, database and retention policy combinations are mapped to specific buckets
|
||||
(for more information, see [Database and retention policy mapping](/influxdb/cloud/reference/api/influxdb-1x/dbrp/)).
|
||||
|
||||
Use the [`from()` function](/influxdb/cloud/reference/flux/stdlib/built-in/inputs/from)
|
||||
to specify the bucket to query:
|
||||
|
||||
###### InfluxQL
|
||||
```sql
|
||||
CREATE CONTINUOUS QUERY "downsample-daily" ON "my-db"
|
||||
-- ...
|
||||
```
|
||||
|
||||
###### Flux
|
||||
```js
|
||||
from(bucket: "my-db/")
|
||||
// ...
|
||||
```
|
||||
|
||||
#### SELECT statement
|
||||
The `SELECT` statement queries data by field, tag, and time from a specific measurement.
|
||||
`SELECT` statements can take many different forms and converting them to Flux depends
|
||||
on your use case. For information about Flux and InfluxQL function parity, see
|
||||
[Flux vs InfluxQL](/influxdb/cloud/reference/flux/flux-vs-influxql/#influxql-and-flux-parity).
|
||||
See [other resources available to help](#other-helpful-resources).
|
||||
|
||||
#### INTO clause
|
||||
The `INTO` clause defines the measurement to write results to.
|
||||
`INTO` also supports fully-qualified measurements that include the database and retention policy.
|
||||
In InfluxDB Cloud, database and retention policy combinations are mapped to specific buckets
|
||||
(for more information, see [Database and retention policy mapping](/influxdb/cloud/reference/api/influxdb-1x/dbrp/)).
|
||||
|
||||
To write to a measurement different than the measurement queried, use
|
||||
[`set()`](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/set/) or
|
||||
[`map()`](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/map/)
|
||||
to change the measurement name.
|
||||
Use the `to()` function to specify the bucket to write results to.
|
||||
|
||||
###### InfluxQL
|
||||
```sql
|
||||
-- ...
|
||||
INTO "example-db"."example-rp"."example-measurement"
|
||||
-- ...
|
||||
```
|
||||
|
||||
###### Flux
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[set()](#)
|
||||
[map()](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```js
|
||||
// ...
|
||||
|> set(key: "_measurement", value: "example-measurement")
|
||||
|> to(bucket: "example-db/example-rp")
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```js
|
||||
// ...
|
||||
|> map(fn: (r) => ({ r with _measurement: "example-measurement"}))
|
||||
|> to(bucket: "example-db/example-rp")
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
##### Write pivoted data to InfluxDB
|
||||
InfluxDB 1.x query results include a column for each field.
|
||||
InfluxDB Cloud does not do this by default, but it is possible with
|
||||
[`pivot()`](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/pivot)
|
||||
or [`schema.fieldsAsCols()`](/influxdb/cloud/reference/flux/stdlib/influxdb-schema/fieldsascols/).
|
||||
|
||||
If you use `to()` to write _pivoted data_ back to InfluxDB Cloud, each field column is stored as a tag.
|
||||
To write pivoted fields back to InfluxDB as fields, import the `experimental` package
|
||||
and use the [`experimental.to()` function](/influxdb/cloud/reference/flux/stdlib/experimental/to/).
|
||||
|
||||
###### InfluxQL
|
||||
```sql
|
||||
CREATE CONTINUOUS QUERY "downsample-daily" ON "my-db"
|
||||
BEGIN
|
||||
SELECT mean("example-field-1"), mean("example-field-2")
|
||||
INTO "example-db"."example-rp"."example-measurement"
|
||||
FROM "example-measurement"
|
||||
GROUP BY time(1h)
|
||||
END
|
||||
```
|
||||
|
||||
###### Flux
|
||||
```js
|
||||
// ...
|
||||
|
||||
from(bucket: "my-db/")
|
||||
|> range(start: -task.every)
|
||||
|> filter(fn: (r) => r._measurement == "example-measurement")
|
||||
|> filter(fn: (r) => r._field == "example-field-1" or r._field == "example-field-2")
|
||||
|> aggregateWindow(every: task.every, fn: mean)
|
||||
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
|
||||
|> experimental.to(bucket: "example-db/example-rp")
|
||||
```
|
||||
|
||||
#### FROM clause
|
||||
The from clause defines the measurement to query.
|
||||
Use the [`filter()` function](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/filter/)
|
||||
to specify the measurement to query.
|
||||
|
||||
###### InfluxQL
|
||||
```sql
|
||||
-- ...
|
||||
FROM "example-measurement"
|
||||
-- ...
|
||||
```
|
||||
|
||||
###### Flux
|
||||
```js
|
||||
// ...
|
||||
|> filter(fn: (r) => r._measurement == "example-measurement")
|
||||
```
|
||||
|
||||
#### AS clause
|
||||
The `AS` clause changes the name of the field when writing data back to InfluxDB.
|
||||
Use [`set()`](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/set/)
|
||||
or [`map()`](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/map/)
|
||||
to change the field name.
|
||||
|
||||
###### InfluxQL
|
||||
```sql
|
||||
-- ...
|
||||
AS newfield
|
||||
-- ...
|
||||
```
|
||||
|
||||
###### Flux
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[set()](#)
|
||||
[map()](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```js
|
||||
// ...
|
||||
|> set(key: "_field", value: "newfield")
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```js
|
||||
// ...
|
||||
|> map(fn: (r) => ({ r with _field: "newfield"}))
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
#### WHERE clause
|
||||
The `WHERE` clause uses predicate logic to filter results based on fields, tags, or timestamps.
|
||||
Use the [`filter()` function](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/filter/)
|
||||
and Flux [comparison operators](/influxdb/cloud/reference/flux/language/operators/#comparison-operators)
|
||||
to filter results based on fields and tags.
|
||||
Use the [`range()` function](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/range/) to filter results based on timestamps.
|
||||
|
||||
###### InfluxQL
|
||||
```sql
|
||||
-- ...
|
||||
WHERE "example-tag" = "foo" AND time > now() - 7d
|
||||
```
|
||||
|
||||
###### Flux
|
||||
```js
|
||||
// ...
|
||||
|> range(start: -7d)
|
||||
|> filter(fn: (r) => r["example-tag"] == "foo")
|
||||
```
|
||||
|
||||
#### GROUP BY clause
|
||||
The InfluxQL `GROUP BY` clause groups data by specific tags or by time (typically to calculate an aggregate value for windows of time).
|
||||
|
||||
##### Group by tags
|
||||
Use the [`group()` function](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/group/)
|
||||
to modify the [group key](/influxdb/cloud/reference/glossary/#group-key) and change how data is grouped.
|
||||
|
||||
###### InfluxQL
|
||||
```sql
|
||||
-- ...
|
||||
GROUP BY "location"
|
||||
```
|
||||
|
||||
###### Flux
|
||||
```js
|
||||
// ...
|
||||
|> group(columns: ["location"])
|
||||
```
|
||||
|
||||
##### Group by time
|
||||
Use the [`aggregateWindow()` function](/influxdb/cloud/reference/flux/stdlib/built-in/transformations/aggregates/aggregatewindow/)
|
||||
to group data into time windows and perform an aggregation on each window.
|
||||
In CQs, the interval specified in the `GROUP BY time()` clause determines the CQ execution interval.
|
||||
Use the `GROUP BY time()` interval to set the `every` task option.
|
||||
|
||||
###### InfluxQL
|
||||
```sql
|
||||
-- ...
|
||||
SELECT MEAN("example-field")
|
||||
FROM "example-measurement"
|
||||
GROUP BY time(1h)
|
||||
```
|
||||
|
||||
###### Flux
|
||||
```js
|
||||
option task = {
|
||||
name: "task-name",
|
||||
every: 1h
|
||||
}
|
||||
|
||||
// ...
|
||||
|> filter(fn: (r) =>
|
||||
r._measurement == "example-measurement" and
|
||||
r._field == "example-field"
|
||||
)
|
||||
|> aggregateWindow(every: task.every, fn: mean)
|
||||
```
|
||||
|
||||
#### RESAMPLE clause
|
||||
|
||||
The CQ `RESAMPLE` clause uses data from the last specified duration to calculate a new aggregate point.
|
||||
The `EVERY` interval in `RESAMPLE` defines how often the CQ runs.
|
||||
The `FOR` interval defines the total time range queried by the CQ.
|
||||
|
||||
To accomplish this same functionality in a Flux task, set the `start` parameter
|
||||
in the `range()` function to the negative `FOR` duration.
|
||||
Define the task execution interval in the `task` options.
|
||||
For example:
|
||||
|
||||
###### InfluxQL
|
||||
```sql
|
||||
CREATE CONTINUOUS QUERY "resample-example" ON "my-db"
|
||||
RESAMPLE EVERY 1m FOR 30m
|
||||
BEGIN
|
||||
SELECT exponential_moving_average(mean("example-field"), 30)
|
||||
INTO "resample-average-example-measurement"
|
||||
FROM "example-measurement"
|
||||
WHERE region = 'example-region'
|
||||
GROUP BY time(1m)
|
||||
END
|
||||
```
|
||||
|
||||
###### Flux
|
||||
```js
|
||||
option task = {
|
||||
name: "resample-example",
|
||||
every: 1m
|
||||
}
|
||||
|
||||
from(bucket: "my-db/")
|
||||
|> range(start: -30m)
|
||||
|> filter(fn: (r) =>
|
||||
r._measurement == "example-measurement" and
|
||||
r._field == "example-field" and
|
||||
r.region == "example-region"
|
||||
)
|
||||
|> aggregateWindow(every: 1m, fn: mean)
|
||||
|> exponentialMovingAverage(n: 30)
|
||||
|> set(key: "_measurement", value: "resample-average-example-measurement")
|
||||
|> to(bucket: "my-db/")
|
||||
```
|
||||
|
||||
## Create new InfluxDB tasks
|
||||
After converting your continuous query to Flux, use the Flux query to
|
||||
[create a new task](/influxdb/cloud/process-data/manage-tasks/create-task/).
|
||||
|
||||
## Other helpful resources
|
||||
The following resources are available and may be helpful when converting
|
||||
continuous queries to Flux tasks.
|
||||
|
||||
##### Documentation
|
||||
- [Get started with Flux](/influxdb/cloud/query-data/get-started/)
|
||||
- [Query data with Flux](/influxdb/cloud/query-data/flux/)
|
||||
- [Common tasks](/influxdb/cloud/process-data/common-tasks/#downsample-data-with-influxdb)
|
||||
|
||||
##### Community
|
||||
- Post in the [InfluxData Community](https://community.influxdata.com/)
|
||||
- Ask in the [InfluxDB Community Slack](https://influxdata.com/slack)
|
|
@ -1,368 +0,0 @@
|
|||
---
|
||||
title: Upgrade from InfluxDB OSS 2.x to InfluxDB Cloud
|
||||
description: >
|
||||
To upgrade from Influx 2.x to InfluxDB Cloud, create a new InfluxDB Cloud account,
|
||||
migrate resources, time series data, and more.
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: 2.x to Cloud
|
||||
parent: Upgrade to Cloud
|
||||
weight: 11
|
||||
---
|
||||
|
||||
To upgrade from **InfluxDB OSS 2.x** to **InfluxDB Cloud**:
|
||||
|
||||
1. [Create an InfluxDB Cloud account](#create-an-influxdb-cloud-account)
|
||||
2. [Create an All-Access authentication token](#create-an-all-access-authentication-token)
|
||||
3. [Set up influx CLI connection configurations](#set-up-influx-cli-connection-configurations)
|
||||
4. [Use templates to migrate InfluxDB resources](#use-templates-to-migrate-influxdb-resources)
|
||||
5. [Migrate DBRP mappings](#migrate-dbrp-mappings)
|
||||
6. [Dual write to InfluxDB 2.x and InfluxDB Cloud](#dual-write-to-influxdb-2x-and-influxdb-cloud)
|
||||
7. [Migrate time series data](#migrate-time-series-data)
|
||||
8. [Collaborate with other users](#collaborate-with-other-users)
|
||||
|
||||
{{% note %}}
|
||||
#### Consider when upgrading
|
||||
- InfluxDB Cloud requires token authentication, and you must create all new authentication tokens.
|
||||
- InfluxDB Cloud does not support:
|
||||
- Multiple [organizations](http://localhost:1313/influxdb/cloud/reference/glossary/#organization) per account.
|
||||
Upgrade a single InfluxDB OSS 2.x organization to an InfluxDB Cloud organization.
|
||||
To upgrade multiple organizations, create a separate InfluxDB Cloud account for each organization.
|
||||
- [InfluxDB scrapers](/{{< latest "influxdb" >}}/write-data/no-code/scrape-data/).
|
||||
To scrape Prometheus-formatted metrics, use the [Telegraf Prometheus input plugin](/{{< latest "telegraf" >}}/plugins/#prometheus).
|
||||
- [1.x compatible authorizations](/{{< latest "influxdb" >}}/reference/api/influxdb-1x/#authentication).
|
||||
{{% /note %}}
|
||||
|
||||
## Create an InfluxDB Cloud account
|
||||
Do one of the following to create an InfluxDB Cloud account:
|
||||
|
||||
- [Subscribe through InfluxData](/influxdb/cloud/get-started/#subscribe-through-influxdata) and
|
||||
[start for free](/influxdb/cloud/get-started/#start-for-free).
|
||||
- [Subscribe through your cloud provider](/influxdb/cloud/get-started/#subscribe-through-a-cloud-provider).
|
||||
|
||||
## Create an All-Access authentication token
|
||||
InfluxDB authentication tokens are unique to each organization.
|
||||
Create an **All-Access** token in your InfluxDB Cloud user interface (UI) to use
|
||||
for the upgrade process.
|
||||
|
||||
1. Click **Data (Load Data) > Tokens** in the left navigation bar.
|
||||
|
||||
{{< nav-icon "data" >}}
|
||||
2. Click **{{< icon "plus" >}} Generate**, and then select **All-Access Token**.
|
||||
3. Enter a description for the token, and then click **{{< icon "check" >}} Save**.
|
||||
|
||||
{{% note %}}
|
||||
If you've created other tokens in your InfluxDB 2.x instance for external libraries or
|
||||
integrations, create corresponding tokens for each in your InfluxDB Cloud instance.
|
||||
You cannot migrate tokens from InfluxDB 2.x to InfluxDB Cloud.
|
||||
{{% /note %}}
|
||||
|
||||
_For more information about managing tokens and token types, see [Manage tokens](/influxdb/cloud/security/tokens/)._
|
||||
|
||||
## Set up influx CLI connection configurations
|
||||
The `influx` command line interface (CLI) lets you create connection configurations
|
||||
that automatically provides **host**, **organization**, and **authentication token**
|
||||
credentials to CLI commands.
|
||||
Use the `influx` CLI packaged with InfluxDB 2.x and the
|
||||
[`influx config create` command](/influxdb/cloud/reference/cli/influx/config/create/)
|
||||
to set up the connection configurations for both your InfluxDB Cloud instance and
|
||||
your InfluxDB 2.x instance.
|
||||
|
||||
Include the following flags for each configuration:
|
||||
|
||||
- **-\-config-name**:
|
||||
Unique name for the connection configuration.
|
||||
The examples below use `cloud` and `oss` respectively.
|
||||
- **-\-host-url**:
|
||||
[InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/) or
|
||||
[InfluxDB 2.x URL](/{{< latest "influxdb" >}}/reference/urls/).
|
||||
- **-\-org**:
|
||||
InfluxDB organization name.
|
||||
The default organization name in InfluxDB Cloud is the email address associated with your account.
|
||||
- **-\-token**: Authentication token to use to connect to InfluxDB.
|
||||
Provide an **All-Access** token (or an [Operator token](/{{< latest "influxdb" >}}/security/tokens/#operator-token) for 2.x).
|
||||
|
||||
##### Create an InfluxDB Cloud connection configuration
|
||||
```sh
|
||||
# Example cloud connection configuration
|
||||
influx config create \
|
||||
--config-name cloud \
|
||||
--host-url https://cloud2.influxdata.com \
|
||||
--org your.email@example.com \
|
||||
--token mY5uP3rS3cRe7Cl0uDt0K3n
|
||||
```
|
||||
|
||||
##### Create an InfluxDB 2.x connection configuration
|
||||
{{< keep-url >}}
|
||||
```sh
|
||||
# Example 2.x connection configuration
|
||||
influx config create \
|
||||
--config-name oss \
|
||||
--host-url http://localhost:8086 \
|
||||
--org example-org \
|
||||
--token mY5uP3rS3cRe70S5t0K3n
|
||||
```
|
||||
|
||||
## Use templates to migrate InfluxDB resources
|
||||
[InfluxDB templates](/influxdb/cloud/influxdb-templates/) let you export InfluxDB
|
||||
[resources](/influxdb/cloud/influxdb-templates/#template-resources) such as buckets,
|
||||
dashboards, labels, tasks, and more and import them into another InfluxDB instance.
|
||||
Export resources from your **InfluxDB 2.x** instance and migrate them to
|
||||
your **InfluxDB Cloud** instance.
|
||||
|
||||
{{% note %}}
|
||||
#### InfluxDB Cloud Free Plan resource limits
|
||||
If upgrading to an [InfluxDB Cloud Free Plan](/influxdb/cloud/account-management/pricing-plans/#free-plan),
|
||||
you are only able to create a limited number of resources.
|
||||
If your exported template exceeds these limits, the resource migration will fail.
|
||||
{{% /note %}}
|
||||
|
||||
- **To migrate _all resources_ from an InfluxDB 2.x organization to an InfluxDB Cloud organization**:
|
||||
Use the [`influx export all` command](/influxdb/cloud/reference/cli/influx/export/all)
|
||||
and pipe the output into the [`influx apply` command](/influxdb/cloud/reference/cli/influx/apply/).
|
||||
Use the `--active-config` flag with each command to specify which connection configuration to use:
|
||||
|
||||
```sh
|
||||
influx export all --active-config oss | influx apply --active-config cloud
|
||||
```
|
||||
|
||||
- **To migrate _specific resources_ from an InfluxDB 2.x organization to an InfluxDB Cloud organization**:
|
||||
Use the [`influx export` command](/influxdb/cloud/reference/cli/influx/export/)
|
||||
with lists of specific resources to export or the [`influx export all` command](/influxdb/cloud/reference/cli/influx/export/all)
|
||||
**with filters**. Pipe the output into the [`influx apply` command](/influxdb/cloud/reference/cli/influx/apply/).
|
||||
Use the `--active-config` flag with each command to specify which connection configuration to use:
|
||||
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[Migrate specific resources](#)
|
||||
[Migrate all with filters](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```sh
|
||||
influx export \
|
||||
--active-config oss \
|
||||
--buckets 0Xx0oox00XXoxxoo1,0Xx0oox00XXoxxoo2 \
|
||||
--labels o0x0oox0Xxoxx001,o0x0oox0Xxoxx002 \
|
||||
--dashboards 0XxXooXoo0xooXo0X1,0XxXooXoo0xooXo0X2 | \
|
||||
influx apply --active-config cloud
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```sh
|
||||
influx export all \
|
||||
--active-config oss \
|
||||
--filter=resourceKind=Bucket \
|
||||
--filter=resourceKind=Dashboard \
|
||||
--filter=labelName=Foo | \
|
||||
influx apply --active-config cloud
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
For more export command examples, see the [`influx export`](/influxdb/cloud/reference/cli/influx/export/#examples)
|
||||
and [`influx export all`](/influxdb/cloud/reference/cli/influx/export/all#examples) documentation.
|
||||
|
||||
{{% note %}}
|
||||
#### Update hardcoded InfluxDB URLs
|
||||
If any of your migrated resources contain hardcoded InfluxDB URLs (`http://localhost:8086`), do one of the following to update these URLs to your [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/):
|
||||
|
||||
- Migrate your resources to InfluxDB Cloud, and then update URLs in the InfluxDB Cloud UI.
|
||||
- Save your template to a file, update URLs in the file, and then apply the template to your InfluxDB Cloud instance.
|
||||
{{% /note %}}
|
||||
|
||||
## Migrate DBRP mappings
|
||||
InfluxDB database and retention policy (DBRP) mappings let you query InfluxDB Cloud
|
||||
buckets with InfluxQL and the InfluxDB 1.x DBRP convention.
|
||||
**If you have DBRP mappings in your InfluxDB 2.x instance**, migrate them
|
||||
to your InfluxDB Cloud instance.
|
||||
|
||||
{{< expand-wrapper >}}
|
||||
{{% expand "Migrate DBRP mappings to InfluxDB Cloud"%}}
|
||||
1. Use the [`influx v1 dbrp list` command](/influxdb/cloud/reference/cli/influx/influx/v1/dbrp/list/)
|
||||
to view the list of DBRP mappings in your **InfluxDB 2.x** instance.
|
||||
|
||||
```sh
|
||||
influx v1 dbrp list --active-config oss
|
||||
```
|
||||
|
||||
2. Use the [`influx bucket list` command](/influxdb/cloud/reference/cli/influx/bucket/list/)
|
||||
to view a list of your **InfluxDB Cloud** buckets and their IDs.
|
||||
|
||||
```sh
|
||||
influx bucket list --active-config cloud
|
||||
```
|
||||
|
||||
3. Use the [`influx v1 dbrp create` command](/influxdb/cloud/reference/cli/influx/influx/v1/dbrp/create/)
|
||||
to create DBRP mappings in your **InfluxDB Cloud** instance that map DBRP
|
||||
combinations to the appropriate bucket ID.
|
||||
|
||||
```sh
|
||||
influx v1 dbrp create \
|
||||
--active-config cloud \
|
||||
--bucket-id 12ab34cd56ef \
|
||||
--database example-db \
|
||||
--rp example-rp
|
||||
```
|
||||
{{% /expand %}}
|
||||
{{< /expand-wrapper >}}
|
||||
|
||||
## Dual write to InfluxDB 2.x and InfluxDB Cloud
|
||||
Update external clients to write to your InfluxDB Cloud instance.
|
||||
**We recommend writing data to both InfluxDB 2.x and InfluxDB Cloud until you
|
||||
finish [migrating your existing time series data](#migrate-time-series-data)**.
|
||||
|
||||
Configure external clients with your InfluxDB Cloud **host**, **organization**,
|
||||
and **authentication token**.
|
||||
|
||||
### Update Telegraf configurations
|
||||
If using Telegraf configurations migrated to or stored in InfluxDB Cloud,
|
||||
[update your Telegraf configurations](/influxdb/cloud/telegraf-configs/update/)
|
||||
**in InfluxDB Cloud** to write to both InfluxDB 2.x and InfluxDB Cloud:
|
||||
|
||||
1. [Update your Telegraf configuration](/influxdb/cloud/telegraf-configs/update/)
|
||||
with a second `influxdb_v2` output to write to your InfluxDB Cloud instance.
|
||||
|
||||
##### Example dual-write Telegraf configuration
|
||||
```toml
|
||||
# Write metrics to InfluxDB 2.x
|
||||
[[outputs.influxdb_v2]]
|
||||
urls = ["https://localhost:8086"]
|
||||
token = "$INFLUX_TOKEN"
|
||||
organization = "example-org"
|
||||
bucket = "example-bucket"
|
||||
|
||||
# Write metrics to InfluxDB Cloud
|
||||
[[outputs.influxdb_v2]]
|
||||
urls = ["https://cloud2.influxdata.com"]
|
||||
token = "$INFLUX_CLOUD_TOKEN"
|
||||
organization = "your.email@example.com"
|
||||
bucket = "example-bucket"
|
||||
```
|
||||
|
||||
2. Add the following environment variables to your Telegraf environment(s):
|
||||
|
||||
- `INFLUX_TOKEN`: InfluxDB 2.x authentication token
|
||||
- `INFLUX_CLOUD_TOKEN`: InfluxDB Cloud authentication token
|
||||
|
||||
3. Use the command provided in your [Telegraf Setup Instructions](/influxdb/cloud/telegraf-configs/#use-influxdb-telegraf-configurations)
|
||||
to restart Telegraf with the updated configuration and begin writing to both
|
||||
InfluxDB 2.x and InfluxDB Cloud.
|
||||
|
||||
## Migrate time series data
|
||||
To migrate your time series data from your InfluxDB 2.x instance to your
|
||||
InfluxDB Cloud instance, do the following:
|
||||
|
||||
1. Use the [`influx bucket list` command](/influxdb/cloud/reference/cli/influx/bucket/list/)
|
||||
to view a list of your **InfluxDB 2.x** buckets and their IDs.
|
||||
|
||||
```sh
|
||||
influx bucket list --active-config oss
|
||||
```
|
||||
|
||||
2. Use the [`influxd inspect export-lp` command](/influxdb/v2.0/reference/cli/influxd/inspect/export-lp/)
|
||||
to export data from a bucket in your **InfluxDB 2.x** instance as line protocol.
|
||||
Include the following flags:
|
||||
|
||||
- **-\-bucket-id**: Bucket ID to export
|
||||
- **-\-engine-path**: InfluxDB [engine path](/{{< latest "influxdb" >}}/reference/internals/file-system-layout/#engine-path)
|
||||
- **-\-output-path**: Output file path
|
||||
- **-\-compress**: _(Optional)_ Gzip the exported line protocol
|
||||
- **-\-start**: _(Optional)_ Earliest timestamp to export
|
||||
- **-\-end**: _(Optional)_ Latest timestamp to export
|
||||
|
||||
```sh
|
||||
influxd inspect export-lp \
|
||||
--bucket-id 12ab34cd56ef \
|
||||
--engine-path ~/.influxdbv2/engine \
|
||||
--compress \
|
||||
--output-path path/to/bucket-export.lp
|
||||
```
|
||||
|
||||
3. Use the [`influx write` command](/influxdb/cloud/reference/cli/influx/write/)
|
||||
to write your exported line protocol to your **InfluxDB Cloud** instance.
|
||||
Provide the following.
|
||||
|
||||
- **-\-bucket**: Target bucket name
|
||||
_OR_
|
||||
**-\-bucket-id**: Target bucket ID
|
||||
- **-\-compression**: _(Optional)_ `gzip` if the exported line protocol is compressed
|
||||
- **-\-file**: Import file path
|
||||
|
||||
```sh
|
||||
influx write \
|
||||
--active-config cloud \
|
||||
--bucket example-bucket \
|
||||
--compression gzip \
|
||||
--file path/to/bucket-export.lp
|
||||
```
|
||||
|
||||
4. Repeat steps 2-3 for each bucket.
|
||||
|
||||
{{% note %}}
|
||||
#### InfluxDB Cloud write rate limits
|
||||
Write requests are subject to rate limits associated with your
|
||||
[InfluxDB Cloud pricing plan](/influxdb/cloud/account-management/pricing-plans/).
|
||||
If your exported line protocol size potentially exceeds your rate limits,
|
||||
consider doing one of the following:
|
||||
|
||||
- Include the `--rate-limit` flag with `influx write` to rate limit written data.
|
||||
|
||||
```sh
|
||||
influx write \
|
||||
--active-config cloud \
|
||||
--bucket example-bucket \
|
||||
--file path/to/bucket-export.lp \
|
||||
--rate-limit "5 MB / 5 min"
|
||||
```
|
||||
|
||||
- Include `--start` and `--end` flags with `influxd inpsect export-lp` to limit
|
||||
exported data by time and then sequentially write the consecutive time ranges.
|
||||
|
||||
```sh
|
||||
influxd inspect export-lp \
|
||||
--bucket-id 12ab34cd56ef \
|
||||
--engine-path ~/.influxdbv2/engine \
|
||||
--start 2021-01-01T00:00:00Z \
|
||||
--end 2021-02-01T00:00:00Z \
|
||||
--compress \
|
||||
--output-path path/to/example-bucket-jan-2021.lp
|
||||
```
|
||||
|
||||
To minimize network bandwidth usage, we recommend using gzip to compress exported line protocol.
|
||||
However, when writing to InfluxDB Cloud, **Data In** and **Ingest batch size**
|
||||
rate limits track the payload size of the **uncompressed** line protocol.
|
||||
{{% /note %}}
|
||||
|
||||
#### Migrate system buckets
|
||||
InfluxDB [system buckets](/influxdb/cloud/reference/internals/system-buckets/)
|
||||
contain data related to the InfluxDB monitoring and alerting system.
|
||||
Although the retention period for system buckets in both InfluxDB Cloud and
|
||||
InfluxDB 2.x is only seven days, if you want to migrate this data,
|
||||
use the same method described above [to migrate time series data](#migrate-time-series-data).
|
||||
|
||||
#### Export and write data in a single command
|
||||
If your data and rate limits allow, you can export and write data in a single
|
||||
command without writing a line protocol export file to disk.
|
||||
The `influxd inspect export-lp` command can output to **stdout** and the `influx write`
|
||||
command accepts line protocol from **stdin**.
|
||||
|
||||
{{< expand-wrapper >}}
|
||||
{{% expand "Export and write data" %}}
|
||||
```sh
|
||||
influxd inspect export-lp \
|
||||
--bucket-id 12ab34cd56ef \
|
||||
--engine-path ~/.influxdbv2/engine \
|
||||
--compress \
|
||||
--output-path - | \
|
||||
influx write \
|
||||
--active-config cloud \
|
||||
--bucket example-bucket \
|
||||
--compression gzip \
|
||||
--rate-limit "5 MB / 5 min"
|
||||
```
|
||||
{{% /expand %}}
|
||||
{{< /expand-wrapper >}}
|
||||
|
||||
## Collaborate with other users
|
||||
To collaborate with other users in your InfluxDB Cloud organization,
|
||||
[invite users to join your organization](/influxdb/cloud/account-management/multi-user/invite-user/).
|
|
@ -37,11 +37,6 @@ Click the timezone dropdown to select a timezone to use for the dashboard. Selec
|
|||
|
||||
Click the refresh button (**{{< icon "refresh" >}}**) to manually refresh the dashboard's data.
|
||||
|
||||
#### Refresh a single dashboard cell
|
||||
|
||||
1. Click the **{{< icon "gear" >}}** on the dashboard cell you want to refresh.
|
||||
2. Click **{{< icon "refresh" >}} Refresh**.
|
||||
|
||||
### Select time range
|
||||
|
||||
1. Select from the time range options in the dropdown menu.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
---
|
||||
title: Band visualization
|
||||
title: Band Plot visualization
|
||||
list_title: Band
|
||||
list_image: /img/influxdb/2-0-visualizations-Band-example.png
|
||||
description:
|
||||
weight: 201
|
||||
weight: 206
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Band
|
||||
|
|
|
@ -4,11 +4,64 @@ list_title: Gauge
|
|||
list_image: /img/influxdb/2-0-visualizations-gauge-example.png
|
||||
description: >
|
||||
The Gauge view displays the single value most recent value for a time series in a gauge view.
|
||||
weight: 201
|
||||
weight: 206
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Gauge
|
||||
parent: Visualization types
|
||||
---
|
||||
|
||||
{{< duplicate-oss >}}
|
||||
The **Gauge** visualization displays the most recent value for a time series in a gauge.
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-gauge-example-8.png" alt="Gauge example" />}}
|
||||
|
||||
Select the **Gauge** option from the visualization dropdown in the upper right.
|
||||
|
||||
## Gauge behavior
|
||||
The gauge visualization displays a single numeric data point within a defined spectrum (_default is 0-100_).
|
||||
It uses the latest point in the first table (or series) returned by the query.
|
||||
|
||||
{{% note %}}
|
||||
#### Queries should return one table
|
||||
Flux does not guarantee the order in which tables are returned.
|
||||
If a query returns multiple tables (or series), the table order can change between query executions
|
||||
and result in the Gauge displaying inconsistent data.
|
||||
For consistent results, the Gauge query should return a single table.
|
||||
{{% /note %}}
|
||||
|
||||
## Gauge Controls
|
||||
To view **Gauge** controls, click **{{< icon "gear" >}} Customize** next to
|
||||
the visualization dropdown.
|
||||
|
||||
- **Prefix**: Prefix to add to the gauge.
|
||||
- **Suffix**: Suffix to add to the gauge.
|
||||
- **Decimal Places**: The number of decimal places to display for the gauge.
|
||||
- **Auto** or **Custom**: Enable or disable auto-setting.
|
||||
|
||||
###### Colorized Thresholds
|
||||
- **Base Color**: Select a base or background color from the selection list.
|
||||
- **Add a Threshold**: Change the color of the gauge based on the current value.
|
||||
- **Value is**: Enter the value at which the gauge should appear in the selected color.
|
||||
Choose a color from the dropdown menu next to the value.
|
||||
|
||||
## Gauge examples
|
||||
Gauge visualizations are useful for showing the current value of a metric and displaying
|
||||
where it falls within a spectrum.
|
||||
|
||||
### Steam pressure gauge
|
||||
The following example queries sensor data that tracks the pressure of steam pipes
|
||||
in a facility and displays it as a gauge.
|
||||
|
||||
###### Query pressure data from a specific sensor
|
||||
```js
|
||||
from(bucket: "example-bucket")
|
||||
|> range(start: -1m)
|
||||
|> filter(fn: (r) =>
|
||||
r._measurement == "steam-sensors" and
|
||||
r._field == "psi"
|
||||
r.sensorID == "a211i"
|
||||
)
|
||||
```
|
||||
|
||||
###### Visualization options for pressure gauge
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-gauge-pressure-8.png" alt="Pressure guage example" />}}
|
||||
|
|
|
@ -15,4 +15,82 @@ related:
|
|||
- /influxdb/cloud/visualize-data/visualization-types/single-stat
|
||||
---
|
||||
|
||||
{{< duplicate-oss >}}
|
||||
The **Graph + Single Stat** view displays the specified time series in a line graph
|
||||
and overlays the single most recent value as a large numeric value.
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-line-graph-single-stat-example-8.png" alt="Line Graph + Single Stat example" />}}
|
||||
|
||||
Select the **Graph + Single Stat** option from the visualization dropdown in the upper right.
|
||||
|
||||
## Graph + Single Stat behavior
|
||||
The Graph visualization color codes each table (or series) in the queried data set.
|
||||
When multiple series are present, it automatically assigns colors based on the selected [Line Colors option](#options).
|
||||
|
||||
The Single Stat visualization displays a single numeric data point.
|
||||
It uses the latest point in the first table (or series) returned by the query.
|
||||
|
||||
{{% note %}}
|
||||
#### Queries should return one table
|
||||
Flux does not guarantee the order in which tables are returned.
|
||||
If a query returns multiple tables (or series), the table order can change between query executions
|
||||
and result in the Single Stat visualization displaying inconsistent data.
|
||||
For consistent Single Stat results, the query should return a single table.
|
||||
{{% /note %}}
|
||||
|
||||
## Graph + Single Stat Controls
|
||||
To view **Graph + Single Stat** controls, click **{{< icon "gear" >}} Customize** next to
|
||||
the visualization dropdown.
|
||||
|
||||
###### Data
|
||||
- **X Column**: Select a column to display on the x-axis.
|
||||
- **Y Column**: Select a column to display on the y-axis.
|
||||
|
||||
###### Options
|
||||
- **Line Colors**: Select a color scheme to use for your graph.
|
||||
- **Shade Area Below Lines**: Shade in the area below the graph lines.
|
||||
|
||||
###### Y Axis
|
||||
- **Y Axis Label**: Label for the y-axis.
|
||||
- **Y Tick Prefix**: Prefix to be added to y-value.
|
||||
- **Y Tick Suffix**: Suffix to be added to y-value.
|
||||
- **Y Axis Domain**: The y-axis value range.
|
||||
- **Auto**: Automatically determine the value range based on values in the data set.
|
||||
- **Custom**: Manually specify the minimum y-axis value, maximum y-axis value, or range by including both.
|
||||
- **Min**: Minimum y-axis value.
|
||||
- **Max**: Maximum y-axis value.
|
||||
- **Positioning**:
|
||||
- **Overlaid**: Display graph lines overlaid on each other.
|
||||
- **Stacked**: Display graph lines stacked on top of each other.
|
||||
|
||||
###### Customize Single-Stat
|
||||
- **Prefix**: Prefix to be added to the single stat.
|
||||
- **Suffix**: Suffix to be added to the single stat.
|
||||
- **Decimal Places**: The number of decimal places to display for the single stat.
|
||||
- **Auto** or **Custom**: Enable or disable auto-setting.
|
||||
|
||||
###### Colorized Thresholds
|
||||
- **Base Color**: Select a base or background color from the selection list.
|
||||
- **Add a Threshold**: Change the color of the single stat based on the current value.
|
||||
- **Value is**: Enter the value at which the single stat should appear in the selected color.
|
||||
Choose a color from the dropdown menu next to the value.
|
||||
- **Colorization**: Choose **Text** for the single stat to change color based on the configured thresholds.
|
||||
Choose **Background** for the background of the graph to change color based on the configured thresholds.
|
||||
|
||||
## Graph + Single Stat examples
|
||||
The primary use case for the Graph + Single Stat visualization is to show the current or latest
|
||||
value as well as historical values.
|
||||
|
||||
### Show current value and historical values
|
||||
The following example shows the current percentage of memory used as well as memory usage over time:
|
||||
|
||||
###### Query memory usage percentage
|
||||
```js
|
||||
from(bucket: "example-bucket")
|
||||
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|
||||
|> filter(fn: (r) =>
|
||||
r._measurement == "mem" and
|
||||
r._field == "used_percent"
|
||||
)
|
||||
```
|
||||
###### Memory allocations visualization
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-graph-single-stat-mem-8.png" alt="Graph + Single Stat Memory Usage Example" />}}
|
||||
|
|
|
@ -11,4 +11,64 @@ menu:
|
|||
parent: Visualization types
|
||||
---
|
||||
|
||||
{{< duplicate-oss >}}
|
||||
The Graph visualization provides several types of graphs, each configured through
|
||||
the [Graph controls](#graph-controls).
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-line-graph-example-8.png" alt="Line Graph example" />}}
|
||||
|
||||
Select the **Graph** option from the visualization dropdown in the upper right.
|
||||
|
||||
## Graph behavior
|
||||
The Graph visualization color codes each table (or series) in the queried data set.
|
||||
When multiple series are present, it automatically assigns colors based on the selected [Line Colors option](#options).
|
||||
|
||||
When using a line graph, all points within a single table are connected. When multiple series are present, it automatically assigns colors based on the selected [Line Colors option](#options).
|
||||
|
||||
## Graph controls
|
||||
To view **Graph** controls, click **{{< icon "gear" >}} Customize** next to
|
||||
the visualization dropdown.
|
||||
|
||||
###### Data
|
||||
- **X Column**: Select a column to display on the x-axis.
|
||||
- **Y Column**: Select a column to display on the y-axis.
|
||||
|
||||
###### Options
|
||||
- **Interpolation**: Select from the following options:
|
||||
- **Line**: Display a time series in a line graph
|
||||
- **Smooth**: Display a time series in a line graph with smooth point interpolation.
|
||||
- **Step**: Display a time series in a staircase graph.
|
||||
<!-- - **Bar**: Display the specified time series using a bar chart. -->
|
||||
<!-- - **Stacked**: Display multiple time series bars as segments stacked on top of each other. -->
|
||||
- **Line Colors**: Select a color scheme to use for your graph.
|
||||
- **Shade Area Below Lines**: Shade in the area below the graph lines.
|
||||
|
||||
###### Y Axis
|
||||
- **Y Axis Label**: Label for the y-axis.
|
||||
- **Y Tick Prefix**: Prefix to be added to y-value.
|
||||
- **Y Tick Suffix**: Suffix to be added to y-value.
|
||||
- **Y Axis Domain**: The y-axis value range.
|
||||
- **Auto**: Automatically determine the value range based on values in the data set.
|
||||
- **Custom**: Manually specify the minimum y-axis value, maximum y-axis value, or range by including both.
|
||||
- **Min**: Minimum y-axis value.
|
||||
- **Max**: Maximum y-axis value.
|
||||
- **Positioning**:
|
||||
- **Overlaid**: Display graph lines overlaid on each other.
|
||||
- **Stacked**: Display graph lines stacked on top of each other.
|
||||
|
||||
|
||||
## Graph Examples
|
||||
|
||||
##### Graph with linear interpolation
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-line-graph-example-8.png" alt="Line Graph example" />}}
|
||||
|
||||
##### Graph with smooth interpolation
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-line-graph-smooth-example-8.png" alt="Step-Plot Graph example" />}}
|
||||
|
||||
##### Graph with step interpolation
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-line-graph-step-example-8.png" alt="Step-Plot Graph example" />}}
|
||||
|
||||
<!-- ##### Stacked Graph example
|
||||
{{< img-hd src="/img/2-0-visualizations-stacked-graph-example.png" alt="Stacked Graph example" />}} -->
|
||||
|
||||
<!-- ##### Bar Graph example
|
||||
{{< img-hd src="/img/2-0-visualizations-bar-graph-example.png" alt="Bar Graph example" />}} -->
|
||||
|
|
|
@ -5,7 +5,7 @@ list_image: /img/influxdb/2-0-visualizations-heatmap-example.png
|
|||
description: >
|
||||
A Heatmap displays the distribution of data on an x and y axes where color
|
||||
represents different concentrations of data points.
|
||||
weight: 202
|
||||
weight: 203
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Heatmap
|
||||
|
@ -14,4 +14,102 @@ related:
|
|||
- /influxdb/cloud/visualize-data/visualization-types/scatter
|
||||
---
|
||||
|
||||
{{< duplicate-oss >}}
|
||||
A **Heatmap** displays the distribution of data on an x and y axes where color
|
||||
represents different concentrations of data points.
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-heatmap-example.png" alt="Heatmap example" />}}
|
||||
|
||||
Select the **Heatmap** option from the visualization dropdown in the upper right.
|
||||
|
||||
## Heatmap behavior
|
||||
Heatmaps divide data points into "bins" – segments of the visualization with upper
|
||||
and lower bounds for both [X and Y axes](#data).
|
||||
The [Bin Size option](#options) determines the bounds for each bin.
|
||||
The total number of points that fall within a bin determine the its value and color.
|
||||
Warmer or brighter colors represent higher bin values or density of points within the bin.
|
||||
|
||||
## Heatmap Controls
|
||||
To view **Heatmap** controls, click **{{< icon "gear" >}} Customize** next to
|
||||
the visualization dropdown.
|
||||
|
||||
###### Data
|
||||
- **X Column**: Select a column to display on the x-axis.
|
||||
- **Y Column**: Select a column to display on the y-axis.
|
||||
|
||||
###### Options
|
||||
- **Color Scheme**: Select a color scheme to use for your heatmap.
|
||||
- **Bin Size**: Specify the size of each bin. Default is 10.
|
||||
|
||||
###### X Axis
|
||||
- **X Axis Label**: Label for the x-axis.
|
||||
- **X Tick Prefix**: Prefix to be added to x-value.
|
||||
- **X Tick Suffix**: Suffix to be added to x-value.
|
||||
- **X Axis Domain**: The x-axis value range.
|
||||
- **Auto**: Automatically determine the value range based on values in the data set.
|
||||
- **Custom**: Manually specify the minimum y-axis value, maximum y-axis value, or range by including both.
|
||||
- **Min**: Minimum x-axis value.
|
||||
- **Max**: Maximum x-axis value.
|
||||
|
||||
###### Y Axis
|
||||
- **Y Axis Label**: Label for the y-axis.
|
||||
- **Y Tick Prefix**: Prefix to be added to y-value.
|
||||
- **Y Tick Suffix**: Suffix to be added to y-value.
|
||||
- **Y Axis Domain**: The y-axis value range.
|
||||
- **Auto**: Automatically determine the value range based on values in the data set.
|
||||
- **Custom**: Manually specify the minimum y-axis value, maximum y-axis value, or range by including both.
|
||||
- **Min**: Minimum y-axis value.
|
||||
- **Max**: Maximum y-axis value.
|
||||
|
||||
## Heatmap examples
|
||||
|
||||
### Cross-measurement correlation
|
||||
The following example explores possible correlation between CPU and Memory usage.
|
||||
It uses data collected with the Telegraf [Mem](/{{< latest "telegraf" >}}/plugins//#mem)
|
||||
and [CPU](/{{< latest "telegraf" >}}/plugins//#cpu) input plugins.
|
||||
|
||||
###### Join CPU and memory usage
|
||||
The following query joins CPU and memory usage on `_time`.
|
||||
Each row in the output table contains `_value_cpu` and `_value_mem` columns.
|
||||
|
||||
```js
|
||||
cpu = from(bucket: "example-bucket")
|
||||
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|
||||
|> filter(fn: (r) =>
|
||||
r._measurement == "cpu" and
|
||||
r._field == "usage_system" and
|
||||
r.cpu == "cpu-total"
|
||||
)
|
||||
|
||||
mem = from(bucket: "example-bucket")
|
||||
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|
||||
|> filter(fn: (r) =>
|
||||
r._measurement == "mem" and
|
||||
r._field == "used_percent"
|
||||
)
|
||||
|
||||
join(tables: {cpu: cpu, mem: mem}, on: ["_time"], method: "inner")
|
||||
```
|
||||
|
||||
###### Use a heatmap to visualize correlation
|
||||
In the Heatmap visualization controls, `_value_cpu` is selected as the [X Column](#data)
|
||||
and `_value_mem` is selected as the [Y Column](#data).
|
||||
The domain for each axis is also customized to account for the scale difference
|
||||
between column values.
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-heatmap-correlation.png" alt="Heatmap correlation example" />}}
|
||||
|
||||
|
||||
## Important notes
|
||||
|
||||
### Differences between a heatmap and a scatter plot
|
||||
Heatmaps and [Scatter plots](/influxdb/cloud/visualize-data/visualization-types/scatter/)
|
||||
both visualize the distribution of data points on X and Y axes.
|
||||
However, in certain cases, heatmaps provide better visibility into point density.
|
||||
|
||||
For example, the dashboard cells below visualize the same query results:
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-heatmap-vs-scatter.png" alt="Heatmap vs Scatter plot" />}}
|
||||
|
||||
The heatmap indicates isolated high point density, which isn't visible in the scatter plot.
|
||||
In the scatter plot visualization, points that share the same X and Y coordinates
|
||||
appear as a single point.
|
||||
|
|
|
@ -5,11 +5,76 @@ list_image: /img/influxdb/2-0-visualizations-histogram-example.png
|
|||
description: >
|
||||
A histogram is a way to view the distribution of data.
|
||||
The y-axis is dedicated to count, and the x-axis is divided into bins.
|
||||
weight: 202
|
||||
weight: 204
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Histogram
|
||||
parent: Visualization types
|
||||
---
|
||||
|
||||
{{< duplicate-oss >}}
|
||||
A histogram is a way to view the distribution of data.
|
||||
The y-axis is dedicated to count, and the X-axis is divided into bins.
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-histogram-example.png" alt="Histogram example" />}}
|
||||
|
||||
Select the **Histogram** option from the visualization dropdown in the upper right.
|
||||
|
||||
## Histogram behavior
|
||||
The Histogram visualization is a bar graph that displays the number of data points
|
||||
that fall within "bins" – segments of the X axis with upper and lower bounds.
|
||||
Bin thresholds are determined by dividing the width of the X axis by the number
|
||||
of bins set using the [Bins option](#options).
|
||||
Data within bins can be further grouped or segmented by selecting columns in the
|
||||
[Group By option](#options).
|
||||
|
||||
{{% note %}}
|
||||
The Histogram visualization automatically bins, segments, and counts data.
|
||||
To work properly, query results **should not** be structured as histogram data.
|
||||
{{% /note %}}
|
||||
|
||||
## Histogram Controls
|
||||
To view **Histogram** controls, click **{{< icon "gear" >}} Customize** next to
|
||||
the visualization dropdown.
|
||||
|
||||
###### Data
|
||||
- **X Column**: The column to select data from.
|
||||
- **Group By**: The column to group by.
|
||||
|
||||
###### Options
|
||||
- **Color Scheme**: Select a color scheme to use for your graph.
|
||||
- **Positioning**: Select **Stacked** to stack groups in a bin on top of each other.
|
||||
Select **Overlaid** to overlay groups in each bin.
|
||||
- **Bins**: Enter a number of bins to divide data into or select Auto to automatically
|
||||
calculate the number of bins.
|
||||
- **Auto** or **Custom**: Enable or disable auto-setting.
|
||||
|
||||
###### X Axis
|
||||
- **X Axis Label**: Label for the x-axis.
|
||||
- **X Axis Domain**: The x-axis value range.
|
||||
- **Auto**: Automatically determine the value range based on values in the data set.
|
||||
- **Custom**: Manually specify the value range of the x-axis.
|
||||
- **Min**: Minimum x-axis value.
|
||||
- **Max**: Maximum x-axis value.
|
||||
|
||||
## Histogram examples
|
||||
|
||||
### View error counts by severity over time
|
||||
The following example uses the Histogram visualization to show the number of errors
|
||||
"binned" by time and segmented by severity.
|
||||
_It utilizes data from the [Telegraf Syslog plugin](/{{< latest "telegraf" >}}/plugins//#syslog)._
|
||||
|
||||
##### Query for errors by severity code
|
||||
```js
|
||||
from(bucket: "example-bucket")
|
||||
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|
||||
|> filter(fn: (r) =>
|
||||
r._measurement == "syslog" and
|
||||
r._field == "severity_code"
|
||||
)
|
||||
```
|
||||
|
||||
##### Histogram settings
|
||||
In the Histogram visualization options, select `_time` as the [X Column](#data)
|
||||
and `severity` as the [Group By](#data) option:
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-histogram-errors.png" alt="Errors histogram" />}}
|
||||
|
|
|
@ -5,11 +5,79 @@ list_image: /img/influxdb/2-0-visualizations-mosaic-example.png
|
|||
description: >
|
||||
The Mosaic visualization displays state changes in your time series data.
|
||||
This visualization type is useful when you want to show changes in string-based states over time.
|
||||
weight: 202
|
||||
weight: 206
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Mosaic
|
||||
parent: Visualization types
|
||||
---
|
||||
|
||||
{{< duplicate-oss >}}
|
||||
The **Mosaic** visualization displays state changes in your time series data.
|
||||
This visualization type is useful when you want to show changes in string-based states over time.
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-mosaic-example.png" alt="Mosaic data visualization" />}}
|
||||
|
||||
Select the **Mosaic** option from the visualization dropdown in the upper left.
|
||||
|
||||
## Mosaic behavior
|
||||
The mosaic visualization displays colored tiles based on string values in a specified column.
|
||||
Each unique string value is represented by a different color.
|
||||
|
||||
## Mosaic controls
|
||||
To view **Mosaic** controls, click **{{< icon "gear" >}} Customize** next to the visualization dropdown.
|
||||
|
||||
###### Data
|
||||
- **Fill Column**: Select a column to fill in the mosaic tiles.
|
||||
- **X Column**: Select a column to display on the x-axis.
|
||||
- **Y Column**: Select one or more columns to display on the y-axis.
|
||||
- **Time Format**: Select the time format. Options include:
|
||||
{{< ui/timestamp-formats >}}
|
||||
|
||||
###### Options
|
||||
- **Color Scheme**: Select a color scheme to use for your graph.
|
||||
|
||||
###### X Axis
|
||||
- **X Axis Label**: Enter a label for the x-axis.
|
||||
- **Generate X-Axis Tick Marks**: Select the method to generate x-axis tick marks:
|
||||
- **Auto**: Select to automatically generate tick marks.
|
||||
- **Custom**: To customize the number of x-axis tick marks, select this option, and then enter the following:
|
||||
- **Total Tick Marks**: Enter the total number of timestamp ticks to display.
|
||||
- **Start Tick Marks At**: Enter the time, in RFC3339 format, to start displaying ticks. Use the **Date Picker** field to automatically generate an RFC3339 formatted timestamp for this field.
|
||||
- **Tick Mark Interval**: Enter the number of milliseconds in between each timestamp tick.
|
||||
|
||||
###### Y Axis
|
||||
- **Y Axis Label**: Enter a label for the y-axis.
|
||||
- **Y Label Separator**: If there's more than one column on the y-axis, enter a delimiter to separate the label, such as a comma or space. If there's no separator specified, the labels are a continuous string of all y columns.
|
||||
- **Generate Y-Axis Tick Marks**: Select the method to generate y-axis tick marks:
|
||||
- **Auto**: Select to automatically generate tick marks.
|
||||
- **Custom**: To customize the number of y-axis tick marks, select this option, and then enter the following:
|
||||
- **Total Tick Marks**: Enter the total number of ticks to display.
|
||||
- **Start Tick Marks At**: Enter the value to start ticks at.
|
||||
- **Tick Mark Interval**: Enter the interval in between each tick.
|
||||
|
||||
###### Legend
|
||||
- **Legend Orientation**: Select the orientation of the legend that appears upon hover:
|
||||
- **Horizontal**: Select to display the legend horizontally.
|
||||
- **Vertical**: Select to display the legend vertically.
|
||||
- **Opacity**: Adjust the legend opacity using the slider.
|
||||
- **Colorize Rows**: Select to display legend rows in colors.
|
||||
|
||||
## Example query
|
||||
The following query uses the [Website Monitoring demo data](/influxdb/cloud/reference/sample-data/#influxdb-cloud-demo-data)
|
||||
to display changes in response times for monitored URLs.
|
||||
The query assigns a `responseTimeSummary` string value based on the response time range.
|
||||
Use `responseTimeSummary` as the **Fill Column** in the [visualization controls](#data).
|
||||
|
||||
```js
|
||||
from(bucket: "Website Monitoring Bucket")
|
||||
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|
||||
|> filter(fn: (r) => r._measurement == "http_response")
|
||||
|> filter(fn: (r) => r._field == "response_time")
|
||||
|> aggregateWindow(every: v.windowPeriod, fn: max, createEmpty: false)
|
||||
|> map(fn: (r) => ({
|
||||
r with responseTimeSummary:
|
||||
if r._value > 0.6 then "high"
|
||||
else if r._value > 0.4 then "medium"
|
||||
else "ok"
|
||||
}))
|
||||
```
|
||||
|
|
|
@ -4,7 +4,7 @@ list_title: Scatter
|
|||
list_image: /img/influxdb/2-0-visualizations-scatter-example.png
|
||||
description: >
|
||||
The Scatter view uses a scatter plot to display time series data.
|
||||
weight: 202
|
||||
weight: 208
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Scatter
|
||||
|
@ -13,4 +13,97 @@ related:
|
|||
- /influxdb/cloud/visualize-data/visualization-types/heatmap
|
||||
---
|
||||
|
||||
{{< duplicate-oss >}}
|
||||
The **Scatter** view uses a scatter plot to display time series data.
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-scatter-example.png" alt="Scatter plot example" />}}
|
||||
|
||||
Select the **Scatter** option from the visualization dropdown in the upper right.
|
||||
|
||||
## Scatter behavior
|
||||
The scatter visualization maps each data point to X and Y coordinates.
|
||||
X and Y axes are specified with the [X Column](#data) and [Y Column](#data) visualization options.
|
||||
Each unique series is differentiated using fill colors and symbols.
|
||||
Use the [Symbol Column](#data) and [Fill Column](#data) options to select columns
|
||||
used to differentiate points in the visualization.
|
||||
|
||||
## Scatter controls
|
||||
To view **Scatter** controls, click **{{< icon "gear" >}} Customize** next to
|
||||
the visualization dropdown.
|
||||
|
||||
###### Data
|
||||
- **Symbol Column**: Define a column containing values that should be differentiated with symbols.
|
||||
- **Fill Column**: Define a column containing values that should be differentiated with fill color.
|
||||
- **X Column**: Select a column to display on the x-axis.
|
||||
- **Y Column**: Select a column to display on the y-axis.
|
||||
|
||||
###### Options
|
||||
- **Color Scheme**: Select a color scheme to use for your scatter plot.
|
||||
|
||||
###### X Axis
|
||||
- **X Axis Label**: Label for the x-axis.
|
||||
|
||||
###### Y Axis
|
||||
- **Y Axis Label**: Label for the y-axis.
|
||||
- **Y Tick Prefix**: Prefix to be added to y-value.
|
||||
- **Y Tick Suffix**: Suffix to be added to y-value.
|
||||
- **Y Axis Domain**: The y-axis value range.
|
||||
- **Auto**: Automatically determine the value range based on values in the data set.
|
||||
- **Custom**: Manually specify the minimum y-axis value, maximum y-axis value, or range by including both.
|
||||
- **Min**: Minimum y-axis value.
|
||||
- **Max**: Maximum y-axis value.
|
||||
|
||||
## Scatter examples
|
||||
|
||||
### Cross-measurement correlation
|
||||
The following example explores possible correlation between CPU and Memory usage.
|
||||
It uses data collected with the Telegraf [Mem](/{{< latest "telegraf" >}}/plugins//#mem)
|
||||
and [CPU](/{{< latest "telegraf" >}}/plugins//#cpu) input plugins.
|
||||
|
||||
###### Query CPU and memory usage
|
||||
The following query creates a union of CPU and memory usage.
|
||||
It scales the CPU usage metric to better align with baseline memory usage.
|
||||
|
||||
```js
|
||||
cpu = from(bucket: "example-bucket")
|
||||
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|
||||
|> filter(fn: (r) =>
|
||||
r._measurement == "cpu" and
|
||||
r._field == "usage_system" and
|
||||
r.cpu == "cpu-total"
|
||||
)
|
||||
// Scale CPU usage
|
||||
|> map(fn: (r) => ({ r with
|
||||
_value: r._value + 60.0,
|
||||
_time: r._time
|
||||
})
|
||||
)
|
||||
|
||||
mem = from(bucket: "example-bucket")
|
||||
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|
||||
|> filter(fn: (r) =>
|
||||
r._measurement == "mem" and
|
||||
r._field == "used_percent"
|
||||
)
|
||||
|
||||
union(tables: [cpu, mem])
|
||||
```
|
||||
|
||||
###### Use a scatter plot to visualize correlation
|
||||
In the Scatter visualization controls, points are differentiated based on their group keys.
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-scatter-correlation.png" alt="Heatmap correlation example" />}}
|
||||
|
||||
## Important notes
|
||||
|
||||
### Differences between a scatter plot and a heatmap
|
||||
Scatter plots and [Heatmaps](/influxdb/cloud/visualize-data/visualization-types/heatmap/)
|
||||
both visualize the distribution of data points on X and Y axes.
|
||||
However, in certain cases, scatterplots can "hide" points if they share the same X and Y coordinates.
|
||||
|
||||
For example, the dashboard cells below visualize the same query results:
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-heatmap-vs-scatter.png" alt="Heatmap vs Scatter plot" />}}
|
||||
|
||||
The heatmap indicates isolated high point density, which isn't visible in the scatter plot.
|
||||
In the scatter plot visualization, points that share the same X and Y coordinates
|
||||
appear as a single point.
|
||||
|
|
|
@ -4,11 +4,62 @@ list_title: Single stat
|
|||
list_image: /img/influxdb/2-0-visualizations-single-stat-example.png
|
||||
description: >
|
||||
The Single Stat view displays the most recent value of the specified time series as a numerical value.
|
||||
weight: 202
|
||||
weight: 205
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Single Stat
|
||||
parent: Visualization types
|
||||
---
|
||||
|
||||
{{< duplicate-oss >}}
|
||||
The **Single Stat** view displays the most recent value of the specified time series as a numerical value.
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-single-stat-example-8.png" alt="Single stat example" />}}
|
||||
|
||||
Select the **Single Stat** option from the visualization dropdown in the upper right.
|
||||
|
||||
## Single Stat behavior
|
||||
The Single Stat visualization displays a single numeric data point.
|
||||
It uses the latest point in the first table (or series) returned by the query.
|
||||
|
||||
{{% note %}}
|
||||
#### Queries should return one table
|
||||
Flux does not guarantee the order in which tables are returned.
|
||||
If a query returns multiple tables (or series), the table order can change between query executions
|
||||
and result in the Single Stat visualization displaying inconsistent data.
|
||||
For consistent results, the Single Stat query should return a single table.
|
||||
{{% /note %}}
|
||||
|
||||
## Single Stat Controls
|
||||
To view **Single Stat** controls, click **{{< icon "gear" >}} Customize** next to
|
||||
the visualization dropdown.
|
||||
|
||||
- **Prefix**: Prefix to be added to the single stat.
|
||||
- **Suffix**: Suffix to be added to the single stat.
|
||||
- **Decimal Places**: The number of decimal places to display for the single stat.
|
||||
- **Auto** or **Custom**: Enable or disable auto-setting.
|
||||
|
||||
###### Colorized Thresholds
|
||||
- **Base Color**: Select a base or background color from the selection list.
|
||||
- **Add a Threshold**: Change the color of the single stat based on the current value.
|
||||
- **Value is**: Enter the value at which the single stat should appear in the selected color.
|
||||
Choose a color from the dropdown menu next to the value.
|
||||
- **Colorization**: Choose **Text** for the single stat to change color based on the configured thresholds.
|
||||
Choose **Background** for the background of the graph to change color based on the configured thresholds.
|
||||
|
||||
## Single Stat examples
|
||||
|
||||
### Show human-readable current value
|
||||
The following example shows the current memory usage displayed has a human-readable percentage:
|
||||
|
||||
###### Query memory usage percentage
|
||||
```js
|
||||
from(bucket: "example-bucket")
|
||||
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|
||||
|> filter(fn: (r) =>
|
||||
r._measurement == "mem" and
|
||||
r._field == "used_percent"
|
||||
)
|
||||
```
|
||||
|
||||
###### Memory usage as a single stat
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-single-stat-example-8.png" alt="Graph + Single Stat Memory Usage Example" />}}
|
||||
|
|
|
@ -5,11 +5,73 @@ list_image: /img/influxdb/2-0-visualizations-table-example.png
|
|||
description: >
|
||||
The Table option displays the results of queries in a tabular view, which is
|
||||
sometimes easier to analyze than graph views of data.
|
||||
weight: 202
|
||||
weight: 207
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Table
|
||||
parent: Visualization types
|
||||
---
|
||||
|
||||
{{< duplicate-oss >}}
|
||||
The **Table** option displays the results of queries in a tabular view, which is
|
||||
sometimes easier to analyze than graph views of data.
|
||||
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-table-example.png" alt="Table example" />}}
|
||||
|
||||
Select the **Table** option from the visualization dropdown in the upper right.
|
||||
|
||||
## Table behavior
|
||||
The table visualization renders queried data in structured, easy-to-read tables.
|
||||
Columns and rows match those in the query output.
|
||||
If query results contain multiple tables, only one table is shown at a time.
|
||||
Select other output tables in the far left column of the table visualization.
|
||||
Tables are identified by their [group key](/influxdb/cloud/query-data/get-started/#group-keys).
|
||||
|
||||
## Table Controls
|
||||
To view **Table** controls, click **{{< icon "gear" >}} Customize** next to
|
||||
the visualization dropdown.
|
||||
|
||||
- **Default Sort Field**: Select the default sort field. Default is **time**.
|
||||
- **Time Format**: Select the time format. Options include:
|
||||
{{< ui/timestamp-formats >}}
|
||||
|
||||
- **Decimal Places**: Enter the number of decimal places. Default (empty field) is **unlimited**.
|
||||
- **Auto** or **Custom**: Enable or disable auto-setting.
|
||||
|
||||
###### Column Settings
|
||||
- **First Column**: Toggle to **Fixed** to lock the first column so that the listings are always visible.
|
||||
Threshold settings do not apply in the first column when locked.
|
||||
- **Table Columns**:
|
||||
- Enter a new name to rename any of the columns.
|
||||
- Click the eye icon next to a column to hide it.
|
||||
- [additional]: Enter name for each additional column.
|
||||
- Change the order of the columns by dragging to the desired position.
|
||||
|
||||
###### Colorized Thresholds
|
||||
- **Base Color**: Select a base or background color from the selection list.
|
||||
- **Add a Threshold**: Change the color of the table based on the current value.
|
||||
- **Value is**: Enter the value at which the table should appear in the selected color.
|
||||
Choose a color from the dropdown menu next to the value.
|
||||
|
||||
## Table examples
|
||||
Tables are helpful when displaying many human-readable metrics in a dashboard
|
||||
such as cluster statistics or log messages.
|
||||
|
||||
### Human-readable cluster metrics
|
||||
The following example queries the latest reported memory usage from a cluster of servers.
|
||||
|
||||
###### Query the latest memory usage from each host
|
||||
```js
|
||||
from(bucket: "example-bucket")
|
||||
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|
||||
|> filter(fn: (r) =>
|
||||
r._measurement == "mem" and
|
||||
r._field == "used_percent"
|
||||
)
|
||||
|> group(columns: ["host"])
|
||||
|> last()
|
||||
|> group()
|
||||
|> keep(columns: ["_value", "host"])
|
||||
```
|
||||
|
||||
###### Cluster metrics in a table
|
||||
{{< img-hd src="/img/influxdb/2-0-visualizations-table-human-readable.png" alt="Human readable metrics in a table" />}}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: Bulk ingest
|
||||
seotitle: Write bulk data to InfluxDB Cloud
|
||||
list_title: Write bulk data to InfluxDB Cloud
|
||||
seotitle: Write bulk data to InfluxDB Cloud.
|
||||
list_title: Write bulk data to InfluxDB Cloud.
|
||||
weight: 105
|
||||
description: >
|
||||
Write existing data to InfluxDB Cloud in bulk.
|
||||
|
@ -14,10 +14,7 @@ alias:
|
|||
- /influxdb/v2.0/write-data/bulk-ingest-cloud
|
||||
---
|
||||
|
||||
To upload a large amount of previously existing *historical* data into InfluxDB Cloud,
|
||||
[contact InfluxData Support](mailto:support@influxdata.com) for assistance.
|
||||
We’ll review your ingest rate limits, volume, and existing
|
||||
[data schema](/influxdb/cloud/reference/key-concepts/data-schema) to ensure the
|
||||
most efficient migration.
|
||||
To upload a large amount of previously existing *historical* data into InfluxDB Cloud, contact Support for assistance.
|
||||
We’ll review your ingest rate limits, volume, and existing [data schema](/influxdb/cloud/reference/key-concepts/data-schema) to ensure the most efficient migration.
|
||||
|
||||
Given our usage-based pricing and because the API is optimized for batched writing, we do not recommend using the API to ingest bulk data.
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
title: Write data with developer tools
|
||||
title: Developer tools
|
||||
seotitle: Write data to InfluxDB with developer tools
|
||||
list_title: Use developer tools
|
||||
weight: 102
|
||||
description: >
|
||||
Use developer tools such as the InfluxDB API and `influx` CLI to write data to InfluxDB.
|
||||
Write data to InfluxDB with developer tools.
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Developer tools
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
---
|
||||
title: Write data with the InfluxDB API
|
||||
title: InfluxDB API
|
||||
seotitle: Write data with the InfluxDB API
|
||||
list_title: Write data with the InfluxDB API
|
||||
weight: 206
|
||||
description: >
|
||||
Use the `/write` endpoint of the InfluxDB API to write data to InfluxDB.
|
||||
Write data to InfluxDB using the InfluxDB API.
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: InfluxDB API
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
---
|
||||
title: Write data with client libraries
|
||||
title: Client libraries
|
||||
seotitle: Write data with client libraries
|
||||
list_title: Write data with client libraries
|
||||
weight: 204
|
||||
description: >
|
||||
Use client libraries to write data to InfluxDB.
|
||||
Write data to InfluxDB using client libraries.
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Client libraries
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
---
|
||||
title: Write data with the influx CLI
|
||||
title: Influx CLI
|
||||
seotitle: Write data with the influx CLI
|
||||
list_title: Write data with the influx CLI
|
||||
weight: 205
|
||||
description: >
|
||||
Use the `influx write` command to write data to InfluxDB from the command line.
|
||||
Write data to InfluxDB using the `influx` CLI.
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Influx CLI
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
---
|
||||
title: Write data with third-party technologies
|
||||
title: Third-party technologies
|
||||
seotitle: Write data with third-party technologies
|
||||
list_title: Write data with third-party technologies
|
||||
weight: 103
|
||||
description: >
|
||||
Write data to InfluxDB using third-party developer tools.
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
---
|
||||
title: Write data to InfluxDB without coding
|
||||
title: No-code solutions
|
||||
seotitle: Write data to InfluxDB without coding
|
||||
list_title: Write data to InfluxDB without coding
|
||||
weight: 101
|
||||
description: >
|
||||
Use existing tools to write data to InfluxDB without writing code.
|
||||
Write data to InfluxDB without writing code.
|
||||
aliases:
|
||||
- /influxdb/cloud/collect-data/advanced-telegraf
|
||||
- /influxdb/cloud/collect-data/use-telegraf
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
---
|
||||
title: Write data with no-code third-party technologies
|
||||
title: Third-party technologies
|
||||
seotitle: Write data with third-party technologies
|
||||
list_title: Write data with third-party technologies
|
||||
weight: 103
|
||||
description: >
|
||||
Write data to InfluxDB using third-party technologies that do not require coding.
|
||||
Write data to InfluxDB using third-party technologies.
|
||||
menu:
|
||||
influxdb_cloud:
|
||||
name: Third-party technologies
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
title: Use Telegraf to write data
|
||||
title: Telegraf (agent)
|
||||
seotitle: Use the Telegraf agent to collect and write data
|
||||
list_title: Use the Telegraf agent
|
||||
weight: 101
|
||||
|
|
|
@ -12,7 +12,7 @@ canonical: /influxdb/v2.0/query-data/optimize-queries/
|
|||
|
||||
Optimize your Flux queries to reduce their memory and compute (CPU) requirements.
|
||||
|
||||
- [Start queries with pushdown functions](#start-queries-with-pushdowns)
|
||||
- [Start queries with pushdown functions](#start-queries-with-pushdown-functions)
|
||||
- [Avoid short window durations](#avoid-short-window-durations)
|
||||
- [Use "heavy" functions sparingly](#use-heavy-functions-sparingly)
|
||||
- [Balance time range and data precision](#balance-time-range-and-data-precision)
|
||||
|
|
|
@ -12,7 +12,7 @@ canonical: /influxdb/v2.0/query-data/optimize-queries/
|
|||
|
||||
Optimize your Flux queries to reduce their memory and compute (CPU) requirements.
|
||||
|
||||
- [Start queries with pushdown functions](#start-queries-with-pushdowns)
|
||||
- [Start queries with pushdown functions](#start-queries-with-pushdown-functions)
|
||||
- [Avoid short window durations](#avoid-short-window-durations)
|
||||
- [Use "heavy" functions sparingly](#use-heavy-functions-sparingly)
|
||||
- [Balance time range and data precision](#balance-time-range-and-data-precision)
|
||||
|
|
|
@ -184,7 +184,7 @@ You can always run the desired version by specifying the full path:
|
|||
|
||||
```sh
|
||||
$ /usr/local/opt/influxdb/bin/influxd version
|
||||
InfluxDB 2.0.5 (git: none) build_date: 2021-04-01T17:55:08Z
|
||||
InfluxDB 2.0.4 (git: none) build_date: 2021-04-01T17:55:08Z
|
||||
$ /usr/local/opt/influxdb@1/bin/influxd version
|
||||
InfluxDB v1.8.4 (git: unknown unknown)
|
||||
```
|
||||
|
|
|
@ -14,121 +14,32 @@ related:
|
|||
products: [oss]
|
||||
---
|
||||
|
||||
{{% cloud %}}
|
||||
Restores **not supported in {{< cloud-name "short" >}}**.
|
||||
{{% /cloud %}}
|
||||
|
||||
Use the `influx restore` command to restore backup data and metadata from InfluxDB OSS.
|
||||
|
||||
- [Restore data with the influx CLI](#restore-data-with-the-influx-cli)
|
||||
- [Recover from a failed restore](#recover-from-a-failed-restore)
|
||||
{{% cloud %}}
|
||||
The `influx restore` command only restores data to InfluxDB OSS, **not {{< cloud-name "short" >}}**.
|
||||
{{% /cloud %}}
|
||||
|
||||
InfluxDB moves existing data and metadata to a temporary location.
|
||||
If the restore fails, InfluxDB preserves temporary data for recovery,
|
||||
otherwise this data is deleted.
|
||||
When restoring data from a backup file set, InfluxDB temporarily moves existing
|
||||
data and metadata while the restore process runs.
|
||||
Once the process completes, the temporary data is deleted.
|
||||
If the restore process fails, InfluxDB preserves the data in the temporary location.
|
||||
_See [Recover from a failed restore](#recover-from-a-failed-restore)._
|
||||
|
||||
{{% note %}}
|
||||
#### Cannot restore to existing buckets
|
||||
The `influx restore` command cannot restore data to existing buckets.
|
||||
Use the `--new-bucket` flag to create a new bucket to restore data to.
|
||||
To restore data and retain bucket names, [delete existing buckets](/influxdb/v2.0/organizations/buckets/delete-bucket/)
|
||||
and then begin the restore process.
|
||||
{{% /note %}}
|
||||
|
||||
## Restore data with the influx CLI
|
||||
Use the `influx restore` command and specify the path to the backup directory.
|
||||
Use the `influx restore` command and specify the path to the backup directory in the first argument.
|
||||
|
||||
```sh
|
||||
# Syntax
|
||||
influx restore <path-to-backup-directory>
|
||||
|
||||
# Example
|
||||
influx restore ~/backups/2020-01-20_12-00/
|
||||
```
|
||||
|
||||
_For more information about restore options and flags, see the
|
||||
[`influx restore` documentation](/influxdb/v2.0/reference/cli/influx/restore/)._
|
||||
|
||||
- [Restore all time series data](#restore-all-time-series-data)
|
||||
- [Restore data from a specific bucket](#restore-data-from-a-specific-bucket)
|
||||
- [Restore and replace all InfluxDB data](#restore-and-replace-all-influxdb-data)
|
||||
|
||||
### Restore all time series data
|
||||
To restore all time series data from a backup directory, provide the following:
|
||||
|
||||
- backup directory path
|
||||
|
||||
```sh
|
||||
influx restore \
|
||||
--input /backups/2020-01-20_12-00/
|
||||
```
|
||||
|
||||
### Restore data from a specific bucket
|
||||
To restore data from a specific backup bucket, provide the following:
|
||||
|
||||
- backup directory path
|
||||
- bucket name or ID
|
||||
|
||||
```sh
|
||||
influx restore \
|
||||
--input /backups/2020-01-20_12-00/ \
|
||||
--bucket example-bucket
|
||||
|
||||
# OR
|
||||
|
||||
influx restore \
|
||||
--input /backups/2020-01-20_12-00/ \
|
||||
--bucket-id 000000000000
|
||||
```
|
||||
|
||||
If a bucket with the same name as the backed up bucket already exists in InfluxDB,
|
||||
use the `--new-bucket` flag to create a new bucket with a different name and
|
||||
restore data into it.
|
||||
|
||||
```sh
|
||||
influx restore \
|
||||
--input /backups/2020-01-20_12-00/ \
|
||||
--bucket example-bucket \
|
||||
--new-bucket new-example-bucket
|
||||
```
|
||||
|
||||
### Restore and replace all InfluxDB data
|
||||
To restore and replace all time series data _and_ InfluxDB key-value data such as
|
||||
tokens, users, dashboards, etc., include the following:
|
||||
|
||||
- `--full` flag
|
||||
- backup directory path
|
||||
|
||||
```sh
|
||||
influx restore \
|
||||
--input /backups/2020-01-20_12-00/ \
|
||||
--full
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
#### Restore to a new InfluxDB server
|
||||
If using a backup to populate a new InfluxDB server:
|
||||
|
||||
1. Retrieve the [admin token](/influxdb/v2.0/security/tokens/#admin-token) from your source InfluxDB instance.
|
||||
2. Set up your new InfluxDB instance, but use the `-t`, `--token` flag to use the
|
||||
**admin token** from your source instance as the admin token on your new instance.
|
||||
|
||||
```sh
|
||||
influx setup --token My5uP3rSecR37t0keN
|
||||
```
|
||||
3. Restore the backup to the new server.
|
||||
|
||||
```sh
|
||||
influx restore \
|
||||
--input /backups/2020-01-20_12-00/ \
|
||||
--full
|
||||
```
|
||||
|
||||
If you do not provide the admin token from your source InfluxDB instance as the
|
||||
admin token in your new instance, the restore process and all subsequent attempts
|
||||
to authenticate with the new server will fail.
|
||||
|
||||
1. The first restore API call uses the auto-generated token to authenticate with
|
||||
the new server and overwrites the entire key-value store in the new server, including
|
||||
the auto-generated token.
|
||||
2. The second restore API call attempts to upload time series data, but uses the
|
||||
auto-generated token to authenticate with new server.
|
||||
That token is overwritten in first restore API call and the process fails to authenticate.
|
||||
{{% /note %}}
|
||||
|
||||
|
||||
## Recover from a failed restore
|
||||
If the restoration process fails, InfluxDB preserves existing data in a `tmp`
|
||||
|
|
|
@ -22,7 +22,6 @@ _See [Differences between InfluxDB Cloud and InfluxDB OSS](#differences-between-
|
|||
{{% tabs %}}
|
||||
[macOS](#)
|
||||
[Linux](#)
|
||||
[Windows](#)
|
||||
[Docker](#)
|
||||
[Kubernetes](#)
|
||||
{{% /tabs %}}
|
||||
|
@ -47,7 +46,7 @@ brew install influxdb
|
|||
|
||||
You can also download the InfluxDB v2.0 binaries for macOS directly:
|
||||
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.5-darwin-amd64.tar.gz" download>InfluxDB v2.0 (macOS)</a>
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.4-darwin-amd64.tar.gz" download>InfluxDB v2.0 (macOS)</a>
|
||||
|
||||
##### (Optional) Verify the authenticity of downloaded binary
|
||||
|
||||
|
@ -65,13 +64,13 @@ If `gpg` is not available, see the [GnuPG homepage](https://gnupg.org/download/)
|
|||
For example:
|
||||
|
||||
```
|
||||
wget https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.5-darwin-amd64.tar.gz.asc
|
||||
wget https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.4-darwin-amd64.tar.gz.asc
|
||||
```
|
||||
|
||||
3. Verify the signature with `gpg --verify`:
|
||||
|
||||
```
|
||||
gpg --verify influxdb2-2.0.5-darwin-amd64.tar.gz.asc influxdb2-2.0.5-darwin-amd64.tar.gz
|
||||
gpg --verify influxdb2-2.0.4-darwin-amd64.tar.gz.asc influxdb2-2.0.4-darwin-amd64.tar.gz
|
||||
```
|
||||
|
||||
The output from this command should include the following:
|
||||
|
@ -88,7 +87,7 @@ or run the following command in a macOS command prompt application such
|
|||
|
||||
```sh
|
||||
# Unpackage contents to the current working directory
|
||||
tar zxvf ~/Downloads/influxdb2-2.0.5-darwin-amd64.tar.gz
|
||||
tar zxvf ~/Downloads/influxdb2-2.0.4-darwin-amd64.tar.gz
|
||||
```
|
||||
|
||||
##### (Optional) Place the binaries in your $PATH
|
||||
|
@ -98,7 +97,7 @@ prefix the executables with `./` to run then in place.
|
|||
|
||||
```sh
|
||||
# (Optional) Copy the influx and influxd binary to your $PATH
|
||||
sudo cp influxdb2-2.0.5-darwin-amd64/{influx,influxd} /usr/local/bin/
|
||||
sudo cp influxdb2-2.0.4-darwin-amd64/{influx,influxd} /usr/local/bin/
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
|
@ -167,8 +166,8 @@ influxd --reporting-disabled
|
|||
|
||||
Download InfluxDB v2.0 for Linux.
|
||||
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.5-linux-amd64.tar.gz" download >InfluxDB v2.0 (amd64)</a>
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.5-linux-arm64.tar.gz" download >InfluxDB v2.0 (arm)</a>
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.4-linux-amd64.tar.gz" download >InfluxDB v2.0 (amd64)</a>
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.4-linux-arm64.tar.gz" download >InfluxDB v2.0 (arm)</a>
|
||||
|
||||
### (Optional) Verify the authenticity of downloaded binary
|
||||
|
||||
|
@ -186,13 +185,13 @@ If `gpg` is not available, see the [GnuPG homepage](https://gnupg.org/download/)
|
|||
For example:
|
||||
|
||||
```
|
||||
wget https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.5-linux-amd64.tar.gz.asc
|
||||
wget https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.4-linux-amd64.tar.gz.asc
|
||||
```
|
||||
|
||||
3. Verify the signature with `gpg --verify`:
|
||||
|
||||
```
|
||||
gpg --verify influxdb2-2.0.5-linux-amd64.tar.gz.asc influxdb2-2.0.5-linux-amd64.tar.gz
|
||||
gpg --verify influxdb2-2.0.4-linux-amd64.tar.gz.asc influxdb2-2.0.4-linux-amd64.tar.gz
|
||||
```
|
||||
|
||||
The output from this command should include the following:
|
||||
|
@ -209,10 +208,10 @@ _**Note:** The following commands are examples. Adjust the file names, paths, an
|
|||
|
||||
```sh
|
||||
# Unpackage contents to the current working directory
|
||||
tar xvzf path/to/influxdb2-2.0.5-linux-amd64.tar.gz
|
||||
tar xvzf path/to/influxdb2-2.0.4-linux-amd64.tar.gz
|
||||
|
||||
# Copy the influx and influxd binary to your $PATH
|
||||
sudo cp influxdb2-2.0.5-linux-amd64/{influx,influxd} /usr/local/bin/
|
||||
sudo cp influxdb2-2.0.4-linux-amd64/{influx,influxd} /usr/local/bin/
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
|
@ -317,80 +316,6 @@ influxd --reporting-disabled
|
|||
{{% /tab-content %}}
|
||||
<!--------------------------------- END Linux --------------------------------->
|
||||
|
||||
<!------------------------------- BEGIN Windows ------------------------------->
|
||||
{{% tab-content %}}
|
||||
{{% warn %}}
|
||||
### Experimental Windows support
|
||||
InfluxDB 2.0 on Windows is currently considered experimental.
|
||||
If you experience issues, please [submit an issue](https://github.com/influxdata/influxdb/issues/new/choose)
|
||||
to the InfluxDB Github repository.
|
||||
|
||||
##### System requirements
|
||||
- Windows 10
|
||||
- 64-bit AMD architecture
|
||||
- [Powershell](https://docs.microsoft.com/powershell/) or
|
||||
[Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/en-us/windows/wsl/)
|
||||
|
||||
##### Command line examples
|
||||
Use **Powershell** or **WSL** to execute `influx` and `influxd` commands.
|
||||
The command line examples in this documentation use `influx` and `influxd` as if
|
||||
installed on the system `PATH`.
|
||||
If these binaries are not installed on your `PATH`, replace `influx` and `influxd`
|
||||
in the provided examples with `./influx` and `./influxd` respectively.
|
||||
{{% /warn %}}
|
||||
|
||||
### Download and install InfluxDB v2.0
|
||||
Download InfluxDB v2.0 for Windows.
|
||||
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.5-windows-amd64.tar.gz" download >InfluxDB v2.0 (Windows)</a>
|
||||
|
||||
Expand the downloaded archive into `C:\Program Files\InfluxData\influxdb`.
|
||||
|
||||
### Networking ports
|
||||
By default, InfluxDB uses TCP port `8086` for client-server communication over
|
||||
the [InfluxDB HTTP API](/influxdb/v2.0/reference/api/).
|
||||
|
||||
### Start InfluxDB
|
||||
In **Powershell**, navigate into `C:\Program Files\InfluxData\influxdb` and start
|
||||
InfluxDB by running the `influxd` daemon:
|
||||
|
||||
```powershell
|
||||
> cd -Path C:\Program Files\InfluxData\influxdb
|
||||
> ./influxd
|
||||
```
|
||||
|
||||
_See the [`influxd` documentation](/influxdb/v2.0/reference/cli/influxd) for information about
|
||||
available flags and options._
|
||||
|
||||
{{% note %}}
|
||||
#### Grant network access
|
||||
When starting InfluxDB for the first time, **Windows Defender** will appear with
|
||||
the following message:
|
||||
|
||||
> Windows Defender Firewall has blocked some features of this app.
|
||||
|
||||
1. Select **Private networks, such as my home or work network**.
|
||||
2. Click **Allow access**.
|
||||
{{% /note %}}
|
||||
|
||||
{{% note %}}
|
||||
#### InfluxDB "phone home"
|
||||
|
||||
By default, InfluxDB sends telemetry data back to InfluxData.
|
||||
The [InfluxData telemetry](https://www.influxdata.com/telemetry) page provides
|
||||
information about what data is collected and how it is used.
|
||||
|
||||
To opt-out of sending telemetry data back to InfluxData, include the
|
||||
`--reporting-disabled` flag when starting `influxd`.
|
||||
|
||||
```bash
|
||||
./influxd --reporting-disabled
|
||||
```
|
||||
{{% /note %}}
|
||||
|
||||
{{% /tab-content %}}
|
||||
<!-------------------------------- END Windows -------------------------------->
|
||||
|
||||
<!-------------------------------- BEGIN Docker ------------------------------->
|
||||
{{% tab-content %}}
|
||||
### Download and run InfluxDB v2.0
|
||||
|
@ -400,7 +325,7 @@ Expose port `8086`, which InfluxDB uses for client-server communication over
|
|||
the [InfluxDB HTTP API](/influxdb/v2.0/reference/api/).
|
||||
|
||||
```sh
|
||||
docker run --name influxdb -p 8086:8086 influxdb:2.0.5
|
||||
docker run --name influxdb -p 8086:8086 influxdb:2.0.4
|
||||
```
|
||||
_To run InfluxDB in [detached mode](https://docs.docker.com/engine/reference/run/#detached-vs-foreground), include the `-d` flag in the `docker run` command._
|
||||
|
||||
|
@ -420,7 +345,7 @@ _To run InfluxDB in [detached mode](https://docs.docker.com/engine/reference/run
|
|||
--name influxdb \
|
||||
-p 8086:8086 \
|
||||
--volume $PWD:/root/.influxdbv2 \
|
||||
influxdb:2.0.5
|
||||
influxdb:2.0.4
|
||||
```
|
||||
|
||||
### Configure InfluxDB with Docker
|
||||
|
@ -433,7 +358,7 @@ To mount an InfluxDB configuration file and use it from within Docker:
|
|||
|
||||
```sh
|
||||
docker run \
|
||||
--rm influxdb:2.0.5 \
|
||||
--rm influxdb:2.0.4 \
|
||||
influxd print-config > config.yml
|
||||
```
|
||||
|
||||
|
@ -444,7 +369,7 @@ To mount an InfluxDB configuration file and use it from within Docker:
|
|||
```sh
|
||||
docker run -p 8086:8086 \
|
||||
-v $PWD/config.yml:/etc/influxdb2/config.yml \
|
||||
influxdb:2.0.5
|
||||
influxdb:2.0.4
|
||||
```
|
||||
|
||||
(Find more about configuring InfluxDB [here](https://docs.influxdata.com/influxdb/v2.0/reference/config-options/).)
|
||||
|
@ -468,7 +393,7 @@ To opt-out of sending telemetry data back to InfluxData, include the
|
|||
`--reporting-disabled` flag when starting the InfluxDB container.
|
||||
|
||||
```sh
|
||||
docker run -p 8086:8086 influxdb:2.0.5 --reporting-disabled
|
||||
docker run -p 8086:8086 influxdb:2.0.4 --reporting-disabled
|
||||
```
|
||||
{{% /note %}}
|
||||
|
||||
|
@ -542,11 +467,11 @@ The setup process is available in both the InfluxDB user interface (UI) and in
|
|||
the `influx` command line interface (CLI).
|
||||
|
||||
{{% note %}}
|
||||
#### Operator token permissions
|
||||
The **Operator token** created in the InfluxDB setup process has
|
||||
#### Admin token permissions
|
||||
The **Admin token** created in the InfluxDB setup process has
|
||||
**full read and write access to all organizations** in the database.
|
||||
To prevent accidental interactions across organizations, we recommend
|
||||
[creating an All-Access token](/influxdb/v2.0/security/tokens/create-token/)
|
||||
[creating an All Access token](/influxdb/v2.0/security/tokens/create-token/)
|
||||
for each organization and using those to manage InfluxDB.
|
||||
{{% /note %}}
|
||||
|
||||
|
@ -581,13 +506,13 @@ If you set up InfluxDB through the UI and want to use the [`influx` CLI](/influx
|
|||
1. In a terminal, run the following command:
|
||||
|
||||
```sh
|
||||
# Set up a configuration profile
|
||||
influx config create -n default \
|
||||
-u http://localhost:8086 \
|
||||
-o example-org \
|
||||
-t mySuP3rS3cr3tT0keN \
|
||||
-a
|
||||
```
|
||||
# Set up a configuration profile
|
||||
influx config create -n default \
|
||||
-u http://localhost:8086 \
|
||||
-o example-org \
|
||||
-t mySuP3rS3cr3tT0keN \
|
||||
-a
|
||||
```
|
||||
This configures a new profile named `default` and makes the profile active so your `influx` CLI commands run against this instance. For more detail, see [influx config](/influxdb/v2.0/reference/cli/influx/config/).
|
||||
|
||||
2. Learn `influx` CLI commands. To see all available `influx` commands, type `influx -h` or check out [influx - InfluxDB command line interface](/influxdb/v2.0/reference/cli/influx/).
|
||||
|
|
|
@ -55,7 +55,7 @@ To view a summary of what's included in a template before applying the template,
|
|||
use the [`influx template` command](/influxdb/v2.0/reference/cli/influx/template/).
|
||||
View a summary of a template stored in your local filesystem or from a URL.
|
||||
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs-wrapper %}}
|
||||
{{% code-tabs %}}
|
||||
[From a file](#)
|
||||
[From a URL](#)
|
||||
|
@ -78,14 +78,14 @@ influx template -u <template-url>
|
|||
influx template -u https://raw.githubusercontent.com/influxdata/community-templates/master/linux_system/linux_system.yml
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
{{% /code-tabs-wrapper %}}
|
||||
|
||||
## Validate a template
|
||||
To validate a template before you install it or troubleshoot a template, use
|
||||
the [`influx template validate` command](/influxdb/v2.0/reference/cli/influx/template/validate/).
|
||||
Validate a template stored in your local filesystem or from a URL.
|
||||
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs-wrapper %}}
|
||||
{{% code-tabs %}}
|
||||
[From a file](#)
|
||||
[From a URL](#)
|
||||
|
@ -108,7 +108,7 @@ influx template validate -u <template-url>
|
|||
influx template validate -u https://raw.githubusercontent.com/influxdata/community-templates/master/linux_system/linux_system.yml
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
{{% /code-tabs-wrapper %}}
|
||||
|
||||
## Apply templates
|
||||
Use the [`influx apply` command](/influxdb/v2.0/reference/cli/influx/apply/) to install templates
|
||||
|
|
|
@ -28,14 +28,10 @@ To send notifications about changes in your data, start by creating a notificati
|
|||
|
||||
- For HTTP, enter the **URL** to send the notification. Select the **auth method** to use: **None** for no authentication. To authenticate with a username and password, select **Basic** and then enter credentials in the **Username** and **Password** fields. To authenticate with a token, select **Bearer**, and then enter the authentication token in the **Token** field.
|
||||
|
||||
- **For Slack**, create an [Incoming WebHook](https://api.slack.com/incoming-webhooks#posting_with_webhooks) in Slack, and then enter your webHook URL in the **Slack Incoming WebHook URL** field.
|
||||
- For Slack, create an [Incoming WebHook](https://api.slack.com/incoming-webhooks#posting_with_webhooks) in Slack, and then enter your webHook URL in the **Slack Incoming WebHook URL** field.
|
||||
|
||||
- **For PagerDuty**:
|
||||
- For PagerDuty:
|
||||
- [Create a new service](https://support.pagerduty.com/docs/services-and-integrations#section-create-a-new-service), [add an integration for your service](https://support.pagerduty.com/docs/services-and-integrations#section-add-integrations-to-an-existing-service), and then enter the PagerDuty integration key for your new service in the **Routing Key** field.
|
||||
- The **Client URL** provides a useful link in your PagerDuty notification. Enter any URL that you'd like to use to investigate issues. This URL is sent as the `client_url` property in the PagerDuty trigger event. By default, the **Client URL** is set to your Monitoring & Alerting History page, and the following included in the PagerDuty trigger event:
|
||||
|
||||
```json
|
||||
"client_url": "http://localhost:8086/orgs/<your-org-ID>/alert-history"
|
||||
```
|
||||
- The **Client URL** provides a useful link in your PagerDuty notification. Enter any URL that you'd like to use to investigate issues. This URL is sent as the `client_url` property in the PagerDuty trigger event. By default, the **Client URL** is set to your Monitoring & Alerting History page, and the following included in the PagerDuty trigger event: `"client_url": "https://us-west-2-1.aws.cloud2.influxdata.net/orgs/<your-org-ID>/alert-history”`
|
||||
|
||||
6. Click **Create Notification Endpoint**.
|
||||
|
|
|
@ -11,8 +11,7 @@ influxdb/v2.0/tags: [buckets]
|
|||
---
|
||||
|
||||
A **bucket** is a named location where time series data is stored.
|
||||
All buckets have a **retention period**, a duration of time that each data point persists.
|
||||
InfluxDB drops all points with timestamps older than the bucket's retention period.
|
||||
All buckets have a **retention policy**, a duration of time that each data point persists.
|
||||
A bucket belongs to an organization.
|
||||
|
||||
The following articles provide information about managing buckets:
|
||||
|
|
|
@ -33,7 +33,7 @@ There are two places you can create a bucket in the UI.
|
|||
3. Enter a **Name** for the bucket.
|
||||
4. Select when to **Delete Data**:
|
||||
- **Never** to retain data forever.
|
||||
- **Older than** to choose a specific retention period.
|
||||
- **Older than** to choose a specific retention policy.
|
||||
5. Click **Create** to create the bucket.
|
||||
|
||||
### Create a bucket in the Data Explorer
|
||||
|
@ -46,7 +46,7 @@ There are two places you can create a bucket in the UI.
|
|||
3. Enter a **Name** for the bucket.
|
||||
4. Select when to **Delete Data**:
|
||||
- **Never** to retain data forever.
|
||||
- **Older than** to choose a specific retention period.
|
||||
- **Older than** to choose a specific retention policy.
|
||||
5. Click **Create** to create the bucket.
|
||||
|
||||
## Create a bucket using the influx CLI
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: Update a bucket
|
||||
seotitle: Update a bucket in InfluxDB
|
||||
description: Update a bucket's name or retention period in InfluxDB using the InfluxDB UI or the influx CLI.
|
||||
description: Update a bucket's name or retention policy in InfluxDB using the InfluxDB UI or the influx CLI.
|
||||
menu:
|
||||
influxdb_2_0:
|
||||
name: Update a bucket
|
||||
|
@ -32,14 +32,14 @@ If you change a bucket name, be sure to update the bucket in the above places as
|
|||
3. Review the information in the window that appears and click **I understand, let's rename my bucket**.
|
||||
4. Update the bucket's name and click **Change Bucket Name**.
|
||||
|
||||
## Update a bucket's retention period in the InfluxDB UI
|
||||
## Update a bucket's retention policy in the InfluxDB UI
|
||||
|
||||
1. In the navigation menu on the left, select **Data (Load Data)** > **Buckets**.
|
||||
|
||||
{{< nav-icon "data" >}}
|
||||
|
||||
2. Click **Settings** next to the bucket you want to update.
|
||||
3. In the window that appears, edit the bucket's retention period.
|
||||
3. In the window that appears, edit the bucket's retention policy.
|
||||
4. Click **Save Changes**.
|
||||
|
||||
## Update a bucket using the influx CLI
|
||||
|
@ -60,9 +60,9 @@ influx bucket update -i <bucket-id> -o <org-name> -n <new-bucket-name>
|
|||
influx bucket update -i 034ad714fdd6f000 -o my-org -n my-new-bucket
|
||||
```
|
||||
|
||||
##### Update a bucket's retention period
|
||||
##### Update a bucket's retention policy
|
||||
|
||||
Valid retention period duration units are nanoseconds (`ns`), microseconds (`us` or `µs`), milliseconds (`ms`), seconds (`s`), minutes (`m`), hours (`h`), days (`d`), or weeks (`w`).
|
||||
Valid retention policy duration units are nanoseconds (`ns`), microseconds (`us` or `µs`), milliseconds (`ms`), seconds (`s`), minutes (`m`), hours (`h`), days (`d`), or weeks (`w`).
|
||||
|
||||
```sh
|
||||
# Syntax
|
||||
|
|
|
@ -74,6 +74,7 @@ Once your task is ready, see [Create a task](/influxdb/v2.0/process-data/manage-
|
|||
## Things to consider
|
||||
- If there is a chance that data may arrive late, specify an `offset` in your
|
||||
task options long enough to account for late-data.
|
||||
- If running a task against a bucket with a finite retention period,
|
||||
schedule tasks to run prior to the end of the retention period to let
|
||||
downsampling tasks complete before data outside of the retention period is dropped.
|
||||
- If running a task against a bucket with a finite retention policy, do not schedule
|
||||
tasks to run too closely to the end of the retention policy.
|
||||
Always provide a "cushion" for downsampling tasks to complete before the data
|
||||
is dropped by the retention policy.
|
||||
|
|
|
@ -10,8 +10,7 @@ menu:
|
|||
weight: 203
|
||||
related:
|
||||
- /influxdb/v2.0/reference/cli/influx/task/run
|
||||
- /influxdb/v2.0/reference/cli/influx/task/run/retry
|
||||
- /influxdb/v2.0/reference/cli/influx/task/retry-failed
|
||||
- /influxdb/v2.0/reference/cli/influx/task/retry
|
||||
---
|
||||
|
||||
InfluxDB data processing tasks generally run in defined intervals or at a specific time,
|
||||
|
@ -33,8 +32,6 @@ Use the `influx task run retry` command to run a task.
|
|||
To run a task from the `influx` CLI, the task must have already run at least once.
|
||||
{{% /note %}}
|
||||
|
||||
{{< cli/influx-creds-note >}}
|
||||
|
||||
```sh
|
||||
# List all tasks to find the ID of the task to run
|
||||
influx task list
|
||||
|
@ -45,22 +42,3 @@ influx task run list --task-id=0000000000000000
|
|||
# Use the task ID and run ID to retry a run
|
||||
influx task run retry --task-id=0000000000000000 --run-id=0000000000000000
|
||||
```
|
||||
|
||||
### Retry failed task runs
|
||||
Use the [`influx task retry-failed` command](/influxdb/v2.0/reference/cli/influx/task/retry-failed/)
|
||||
to retry failed task runs.
|
||||
|
||||
```sh
|
||||
# Retry failed tasks for a specific task
|
||||
influx task retry-failed \
|
||||
--id 0000000000000000
|
||||
|
||||
# Print information about runs that will be retried
|
||||
influx task retry-failed \
|
||||
--dry-run
|
||||
|
||||
# Retry failed task runs that occurred in a specific time range
|
||||
influx task retry-failed \
|
||||
--after 2021-01-01T00:00:00Z \
|
||||
--before 2021-01-01T23:59:59Z
|
||||
```
|
||||
|
|
|
@ -8,9 +8,7 @@ menu:
|
|||
parent: Manage tasks
|
||||
weight: 203
|
||||
related:
|
||||
- /influxdb/v2.0/reference/cli/influx/task/list
|
||||
- /influxdb/v2.0/reference/cli/influx/task/run/list
|
||||
- /influxdb/v2.0/reference/cli/influx/task/retry-failed
|
||||
- /influxdb/v2.0/reference/cli/influx/task/run/find
|
||||
---
|
||||
|
||||
When an InfluxDB task runs, a "run" record is created in the task's history.
|
||||
|
@ -46,22 +44,3 @@ influx task run list --task-id=0000000000000000
|
|||
{{% note %}}
|
||||
Detailed run logs are not currently available in the `influx` CLI.
|
||||
{{% /note %}}
|
||||
|
||||
## Retry failed task runs
|
||||
Use the [`influx task retry-failed` command](/influxdb/v2.0/reference/cli/influx/task/retry-failed/)
|
||||
to retry failed task runs.
|
||||
|
||||
```sh
|
||||
# Retry failed tasks for a specific task
|
||||
influx task retry-failed \
|
||||
--id 0000000000000000
|
||||
|
||||
# Print information about runs that will be retried
|
||||
influx task retry-failed \
|
||||
--dry-run
|
||||
|
||||
# Retry failed task runs that occurred in a specific time range
|
||||
influx task retry-failed \
|
||||
--after 2021-01-01T00:00:00Z \
|
||||
--before 2021-01-01T23:59:59Z
|
||||
```
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: Create custom Flux functions
|
||||
description: Create your own custom Flux functions to transform and operate on data.
|
||||
description: Create your own custom Flux functions to transform and manipulate data.
|
||||
list_title: Custom functions
|
||||
influxdb/v2.0/tags: [functions, custom, flux]
|
||||
menu:
|
||||
|
|
|
@ -12,56 +12,29 @@ influxdb/v2.0/tags: [query]
|
|||
|
||||
Optimize your Flux queries to reduce their memory and compute (CPU) requirements.
|
||||
|
||||
- [Start queries with pushdowns](#start-queries-with-pushdowns)
|
||||
- [Avoid processing filters inline](#avoid-processing-filters-inline)
|
||||
- [Start queries with pushdown functions](#start-queries-with-pushdown-functions)
|
||||
- [Avoid short window durations](#avoid-short-window-durations)
|
||||
- [Use "heavy" functions sparingly](#use-heavy-functions-sparingly)
|
||||
- [Use set() instead of map() when possible](#use-set-instead-of-map-when-possible)
|
||||
- [Balance time range and data precision](#balance-time-range-and-data-precision)
|
||||
- [Measure query performance with Flux profilers](#measure-query-performance-with-flux-profilers)
|
||||
|
||||
## Start queries with pushdowns
|
||||
**Pushdowns** are functions or function combinations that push data operations to the underlying data source rather than operating on data in memory. Start queries with pushdowns to improve query performance. Once a non-pushdown function runs, Flux pulls data into memory and runs all subsequent operations there.
|
||||
#### Pushdown functions and function combinations
|
||||
Most pushdowns are supported when querying an InfluxDB 2.0 or InfluxDB Cloud data source. As shown in the following table, a handful of pushdowns are not supported in InfluxDB 2.0.
|
||||
## Start queries with pushdown functions
|
||||
Some Flux functions can push their data manipulation down to the underlying
|
||||
data source rather than storing and manipulating data in memory.
|
||||
These are known as "pushdown" functions and using them correctly can greatly
|
||||
reduce the amount of memory necessary to run a query.
|
||||
|
||||
| Functions | InfluxDB 2.0 | InfluxDB Cloud |
|
||||
|:--------- |:------------: |:--------------: |
|
||||
| **count()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **drop()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **duplicate()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **filter()** {{% req " \*" %}} | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **fill()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **first()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **keep()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **last()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **max()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **mean()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **min()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **range()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **rename()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **sum()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **window()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| _Function combinations_ | | |
|
||||
| **group()** \|> **count()** | | {{< icon "check" >}} |
|
||||
| **group()** \|> **first()** | | {{< icon "check" >}} |
|
||||
| **group()** \|> **last()** | | {{< icon "check" >}} |
|
||||
| **group()** \|> **max()** | | {{< icon "check" >}} |
|
||||
| **group()** \|> **min()** | | {{< icon "check" >}} |
|
||||
| **group()** \|> **sum()** | | {{< icon "check" >}} |
|
||||
| **window()** \|> **count()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **window()** \|> **first()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **window()** \|> **last()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **window()** \|> **max()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **window()** \|> **min()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
| **window()** \|> **sum()** | {{< icon "check" >}} | {{< icon "check" >}} |
|
||||
#### Pushdown functions
|
||||
- [range()](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/range/)
|
||||
- [filter()](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/filter/)
|
||||
<!--
|
||||
[group()](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/group/)
|
||||
[count()](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/count/)
|
||||
[sum()](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/sum/)
|
||||
[first()](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/selectors/first/)
|
||||
[last()](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/selectors/last/)
|
||||
-->
|
||||
|
||||
{{% caption %}}
|
||||
{{< req "\*" >}} **filter()** only pushes down when all parameter values are static.
|
||||
See [Avoid processing filters inline](#avoid-processing-filters-inline).
|
||||
{{% /caption %}}
|
||||
|
||||
Use pushdown functions and function combinations at the beginning of your query.
|
||||
Use pushdown functions at the beginning of your query.
|
||||
Once a non-pushdown function runs, Flux pulls data into memory and runs all
|
||||
subsequent operations there.
|
||||
|
||||
|
@ -69,41 +42,12 @@ subsequent operations there.
|
|||
```js
|
||||
from(bucket: "example-bucket")
|
||||
|> range(start: -1h) //
|
||||
|> filter(fn: (r) => r.sensor == "abc123") //
|
||||
|> group(columns: ["_field", "host"]) // Pushed to the data source
|
||||
|> aggregateWindow(every: 5m, fn: max) //
|
||||
|> filter(fn: (r) => r.sensor == "abc123") // Pushed to the data source
|
||||
|
||||
|> group(columns: ["_field", "host"]) //
|
||||
|> aggregateWindow(every: 5m, fn: max) // Run in memory
|
||||
|> filter(fn: (r) => r._value >= 90.0) //
|
||||
|
||||
|> top(n: 10) // Run in memory
|
||||
```
|
||||
|
||||
### Avoid processing filters inline
|
||||
Avoid using mathematic operations or string manipulation inline to define data filters.
|
||||
Processing filter values inline prevents `filter()` from pushing its operation down
|
||||
to the underlying data source, so data returned by the
|
||||
previous function loads into memory.
|
||||
This often results in a significant performance hit.
|
||||
|
||||
For example, the following query uses [dashboard variables](/influxdb/v2.0/visualize-data/variables/)
|
||||
and string concatenation to define a region to filter by.
|
||||
Because `filter()` uses string concatenation inline, it can't push its operation
|
||||
to the underlying data source and loads all data returned from `range()` into memory.
|
||||
|
||||
```js
|
||||
from(bucket: "example-bucket")
|
||||
|> range(start: -1h)
|
||||
|> filter(fn: (r) => r.region == v.provider + v.region)
|
||||
```
|
||||
|
||||
To dynamically set filters and maintain the pushdown ability of the `filter()` function,
|
||||
use variables to define filter values outside of `filter()`:
|
||||
|
||||
```js
|
||||
region = v.provider + v.region
|
||||
|
||||
from(bucket: "example-bucket")
|
||||
|> range(start: -1h)
|
||||
|> filter(fn: (r) => r.region == region)
|
||||
|> top(n: 10) //
|
||||
```
|
||||
|
||||
## Avoid short window durations
|
||||
|
@ -118,6 +62,7 @@ Consider their necessity in your data processing before using them:
|
|||
|
||||
- [map()](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/map/)
|
||||
- [reduce()](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/reduce/)
|
||||
- [window()](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/window/)
|
||||
- [join()](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/join/)
|
||||
- [union()](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/union/)
|
||||
- [pivot()](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/pivot/)
|
||||
|
@ -126,60 +71,10 @@ Consider their necessity in your data processing before using them:
|
|||
We're continually optimizing Flux and this list may not represent its current state.
|
||||
{{% /note %}}
|
||||
|
||||
## Use set() instead of map() when possible
|
||||
[`set()`](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/set/),
|
||||
[`experimental.set()`](/influxdb/v2.0/reference/flux/stdlib/experimental/set/),
|
||||
and [`map`](/influxdb/v2.0/reference/flux/stdlib/built-in/transformations/map/)
|
||||
can each set columns value in data, however **set** functions have performance
|
||||
advantages over `map()`.
|
||||
|
||||
Use the following guidelines to determine which to use:
|
||||
|
||||
- If setting a column value to a predefined, static value, use `set()` or `experimental.set()`.
|
||||
- If dynamically setting a column value using **existing row data**, use `map()`.
|
||||
|
||||
#### Set a column value to a static value
|
||||
The following queries are functionally the same, but using `set()` is more performant than using `map()`.
|
||||
|
||||
```js
|
||||
data
|
||||
|> map(fn: (r) => ({ r with foo: "bar" }))
|
||||
|
||||
// Recommended
|
||||
data
|
||||
|> set(key: "foo", as: "bar" }))
|
||||
```
|
||||
|
||||
#### Dynamically set a column value using existing row data
|
||||
```js
|
||||
data
|
||||
|> map(fn: (r) => ({ r with foo: r.bar }))
|
||||
```
|
||||
|
||||
## Balance time range and data precision
|
||||
To ensure queries are performant, balance the time range and the precision of your data.
|
||||
For example, if you query data stored every second and request six months worth of data,
|
||||
results would include ≈15.5 million points per series. Depending on the number of series returned after `filter()`([cardinality](/influxdb/v2.0/reference/glossary/#series-cardinality)), this can quickly become many billions of points.
|
||||
Flux must store these points in memory to generate a response. Use [pushdowns](#pushdown-functions-and-function-combinations) to optimize how many points are stored in memory.
|
||||
Flux must store these points in memory to generate a response. Use [pushdown functions](#pushdown-functions) to optimize how many points are stored in memory.
|
||||
|
||||
To query data over large periods of time, create a task to [downsample data](/influxdb/v2.0/process-data/common-tasks/downsample-data/), and then query the downsampled data instead.
|
||||
|
||||
## Measure query performance with Flux profilers
|
||||
Use the [Flux Profiler package](/influxdb/v2.0/reference/flux/stdlib/profiler/)
|
||||
to measure query performance and append performance metrics to your query output.
|
||||
The following Flux profilers are available:
|
||||
|
||||
- **query**: provides statistics about the execution of an entire Flux script.
|
||||
- **operator**: provides statistics about each operation in a query.
|
||||
|
||||
Import the `profiler` package and enable profilers with the `profile.enabledProfilers` option.
|
||||
|
||||
```js
|
||||
import "profiler"
|
||||
|
||||
option profiler.enabledProfilers = ["query", "operator"]
|
||||
|
||||
// Query to profile
|
||||
```
|
||||
|
||||
For more information about Flux profilers, see the [Flux Profiler package](/influxdb/v2.0/reference/flux/stdlib/profiler/).
|
||||
|
|
|
@ -66,8 +66,8 @@ Authorization: Token mYSuP3rs3cREtT0k3N
|
|||
The compatibility API supports InfluxQL, with the following caveats:
|
||||
|
||||
- The `INTO` clause (e.g. `SELECT ... INTO ...`) is not supported.
|
||||
- With the exception of [`DELETE`](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#delete-series-with-delete) and
|
||||
[`DROP MEASUREMENT`](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#delete-measurements-with-drop-measurement) queries, which are still allowed,
|
||||
- With the exception of [`DELETE`](/influxdb/v1.8/query_language/manage-database/#delete-series-with-delete) and
|
||||
[`DROP MEASUREMENT`](/influxdb/v1.8/query_language/manage-database/#delete-measurements-with-drop-measurement) queries, which are still allowed,
|
||||
InfluxQL database management commands are not supported.
|
||||
|
||||
## Compatibility endpoints
|
||||
|
|
|
@ -65,6 +65,7 @@ For more information about managing CLI configurations, see the
|
|||
| [task](/influxdb/v2.0/reference/cli/influx/task) | Task management commands |
|
||||
| [telegrafs](/influxdb/v2.0/reference/cli/influx/telegrafs) | Telegraf configuration management commands |
|
||||
| [template](/influxdb/v2.0/reference/cli/influx/template) | Summarize and validate an InfluxDB template |
|
||||
| [transpile](/influxdb/v2.0/reference/cli/influx/transpile) | Manually transpile an InfluxQL query to Flux |
|
||||
| [user](/influxdb/v2.0/reference/cli/influx/user) | User management commands |
|
||||
| [v1](/influxdb/v2.0/reference/cli/influx/v1) | Work with the v1 compatibility API |
|
||||
| [version](/influxdb/v2.0/reference/cli/influx/version) | Print the influx CLI version |
|
||||
|
|
|
@ -9,7 +9,6 @@ weight: 101
|
|||
influxdb/v2.0/tags: [backup]
|
||||
related:
|
||||
- /influxdb/v2.0/backup-restore/backup/
|
||||
- /influxdb/v2.0/reference/cli/influx/restore/
|
||||
- /influxdb/v2.0/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials
|
||||
- /influxdb/v2.0/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions
|
||||
---
|
||||
|
|
|
@ -10,7 +10,6 @@ aliases:
|
|||
- /influxdb/v2.0/reference/cli/influx/bucket/create/
|
||||
related:
|
||||
- /influxdb/v2.0/organizations/buckets/create-bucket/
|
||||
- /influxdb/v2.0/reference/internals/shards/
|
||||
- /influxdb/v2.0/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials
|
||||
- /influxdb/v2.0/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions
|
||||
---
|
||||
|
@ -23,22 +22,21 @@ influx bucket create [flags]
|
|||
```
|
||||
|
||||
## Flags
|
||||
| Flag | | Description | Input type | {{< cli/mapped >}} |
|
||||
|:---- |:--- |:----------- |:----------: |:------------------ |
|
||||
| `-c` | `--active-config` | CLI configuration to use for command | string | |
|
||||
| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string |`INFLUX_CONFIGS_PATH` |
|
||||
| `-d` | `--description` | Bucket description | string | |
|
||||
| `-h` | `--help` | Help for the `create` command | | |
|
||||
| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` |
|
||||
| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` |
|
||||
| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` |
|
||||
| `-n` | `--name` | Bucket name | string | `INFLUX_BUCKET_NAME` |
|
||||
| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` |
|
||||
| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` |
|
||||
| `-r` | `--retention` | Duration bucket retains data (0 is infinite, default is 0) | duration | |
|
||||
| | `--shard-group-duration` | Bucket shard group duration (OSS only) | string | |
|
||||
| | `--skip-verify` | Skip TLS certificate verification | | |
|
||||
| `-t` | `--token` | Authentication token | string | `INFLUX_TOKEN` |
|
||||
| Flag | | Description | Input type | {{< cli/mapped >}} |
|
||||
|:---- |:--- |:----------- |:----------: |:------------------ |
|
||||
| `-c` | `--active-config` | CLI configuration to use for command | string | |
|
||||
| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string |`INFLUX_CONFIGS_PATH` |
|
||||
| `-d` | `--description` | Bucket description | string | |
|
||||
| `-h` | `--help` | Help for the `create` command | | |
|
||||
| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` |
|
||||
| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` |
|
||||
| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` |
|
||||
| `-n` | `--name` | Bucket name | string | `INFLUX_BUCKET_NAME` |
|
||||
| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` |
|
||||
| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` |
|
||||
| `-r` | `--retention` | Duration bucket will retain data (0 is infinite, default is 0) | duration | |
|
||||
| | `--skip-verify` | Skip TLS certificate verification | | |
|
||||
| `-t` | `--token` | Authentication token | string | `INFLUX_TOKEN` |
|
||||
|
||||
{{% note %}}
|
||||
Valid `--retention` units are nanoseconds (`ns`), microseconds (`us` or `µs`),
|
||||
|
@ -52,7 +50,6 @@ milliseconds (`ms`), seconds (`s`), minutes (`m`), hours (`h`), days (`d`), and
|
|||
- [Create a bucket with infinite data retention](#create-a-bucket-with-infinite-data-retention)
|
||||
- [Create a bucket that retains data for 30 days](#create-a-bucket-that-retains-data-for-30-days)
|
||||
- [Create a bucket with a description](#create-a-bucket-with-a-description)
|
||||
- [Create a bucket with a custom shard group duration](#create-a-bucket-with-a-custom-shard-group-duration)
|
||||
|
||||
##### Create a bucket with infinite data retention
|
||||
```sh
|
||||
|
@ -72,14 +69,3 @@ influx bucket create \
|
|||
--name example-bucket \
|
||||
--description "Example bucket description"
|
||||
```
|
||||
|
||||
##### Create a bucket with a custom shard group duration
|
||||
Custom shard group durations are only supported in **InfluxDB OSS**.
|
||||
The shard group duration must be shorter than the bucket's retention period. For more information, see [InfluxDB shards and shard groups](/influxdb/v2.0/reference/internals/shards/).
|
||||
|
||||
```sh
|
||||
influx bucket create \
|
||||
--name example-bucket \
|
||||
--retention 30d \
|
||||
--shard-group-duration 2d
|
||||
```
|
||||
|
|
|
@ -8,11 +8,6 @@ menu:
|
|||
weight: 201
|
||||
aliases:
|
||||
- /influxdb/v2.0/reference/cli/influx/bucket/update/
|
||||
related:
|
||||
- /influxdb/v2.0/organizations/buckets/update-bucket/
|
||||
- /influxdb/v2.0/reference/internals/shards/
|
||||
- /influxdb/v2.0/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials
|
||||
- /influxdb/v2.0/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions
|
||||
---
|
||||
|
||||
The `influx bucket update` command updates information associated with buckets in InfluxDB.
|
||||
|
@ -23,21 +18,20 @@ influx bucket update [flags]
|
|||
```
|
||||
|
||||
## Flags
|
||||
| Flag | | Description | Input type | {{< cli/mapped >}} |
|
||||
|:---- |:--- |:----------- |:----------: |:------------------ |
|
||||
| `-c` | `--active-config` | CLI configuration to use for command | string | |
|
||||
| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string |`INFLUX_CONFIGS_PATH` |
|
||||
| `-d` | `--description` | Bucket description | string | |
|
||||
| `-h` | `--help` | Help for the `update` command | | |
|
||||
| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` |
|
||||
| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` |
|
||||
| `-i` | `--id` | ({{< req >}}) Bucket ID | string | |
|
||||
| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` |
|
||||
| `-n` | `--name` | New bucket name | string | `INFLUX_BUCKET_NAME` |
|
||||
| `-r` | `--retention` | New duration bucket will retain data | duration | |
|
||||
| | `--shard-group-duration` | Custom shard group duration for the bucket (OSS only) | string | |
|
||||
| | `--skip-verify` | Skip TLS certificate verification | | |
|
||||
| `-t` | `--token` | Authentication token | string | `INFLUX_TOKEN` |
|
||||
| Flag | | Description | Input type | {{< cli/mapped >}} |
|
||||
|:---- |:--- |:----------- |:----------: |:------------------ |
|
||||
| `-c` | `--active-config` | CLI configuration to use for command | string | |
|
||||
| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string |`INFLUX_CONFIGS_PATH` |
|
||||
| `-d` | `--description` | Bucket description | string | |
|
||||
| `-h` | `--help` | Help for the `update` command | | |
|
||||
| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` |
|
||||
| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` |
|
||||
| `-i` | `--id` | ({{< req >}}) Bucket ID | string | |
|
||||
| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` |
|
||||
| `-n` | `--name` | New bucket name | string | `INFLUX_BUCKET_NAME` |
|
||||
| `-r` | `--retention` | New duration bucket will retain data | duration | |
|
||||
| | `--skip-verify` | Skip TLS certificate verification | | |
|
||||
| `-t` | `--token` | Authentication token | string | `INFLUX_TOKEN` |
|
||||
|
||||
{{% note %}}
|
||||
Valid `--retention` units are nanoseconds (`ns`), microseconds (`us` or `µs`),
|
||||
|
@ -51,23 +45,13 @@ milliseconds (`ms`), seconds (`s`), minutes (`m`), hours (`h`), days (`d`), and
|
|||
##### Update the name of a bucket
|
||||
```sh
|
||||
influx bucket update \
|
||||
--id 06c86c40a9f36000 \
|
||||
--id 06c86c40a9f36000
|
||||
--name new-bucket-name
|
||||
```
|
||||
|
||||
##### Update the retention period of a bucket
|
||||
```sh
|
||||
influx bucket update \
|
||||
--id 06c86c40a9f36000 \
|
||||
--id 06c86c40a9f36000
|
||||
--retention 90d
|
||||
```
|
||||
|
||||
##### Update the shard group duration of a bucket
|
||||
Custom shard group durations are only supported in **InfluxDB OSS**.
|
||||
The shard group duration must be shorter than the buckets retention period.
|
||||
|
||||
```sh
|
||||
influx bucket update \
|
||||
--id 06c86c40a9f36000 \
|
||||
--shard-group-duration 2d
|
||||
```
|
||||
```
|
|
@ -20,9 +20,9 @@ _For detailed examples of exporting InfluxDB templates, see
|
|||
[Create an InfluxDB template](/influxdb/v2.0/influxdb-templates/create/)._
|
||||
|
||||
{{% note %}}
|
||||
To export resources as a template, you must use the **Operator token** created for
|
||||
the initial InfluxDB user or an **All-Access token**.
|
||||
For information about creating an All-Access token, see [Create an authentication token](/influxdb/v2.0/security/tokens/create-token/).
|
||||
To export resources as a template, you must use the **Admin token** created for
|
||||
the initial InfluxDB user or an **All Access token**.
|
||||
For information about creating an All Access token, see [Create an authentication token](/influxdb/v2.0/security/tokens/create-token/).
|
||||
{{% /note %}}
|
||||
|
||||
## Usage
|
||||
|
|
|
@ -17,9 +17,9 @@ The `influx export stack` command exports all resources associated with a stack
|
|||
All `metadata.name` fields remain the same.
|
||||
|
||||
{{% note %}}
|
||||
To export resources as a template, you must use the **Operator token** created for
|
||||
the initial InfluxDB user or an **All-Access token**.
|
||||
For information about creating an All-Access token, see [Create an authentication token](/influxdb/v2.0/security/tokens/create-token/).
|
||||
To export resources as a template, you must use the **Admin token** created for
|
||||
the initial InfluxDB user or an **All Access token**.
|
||||
For information about creating an All Access token, see [Create an authentication token](/influxdb/v2.0/security/tokens/create-token/).
|
||||
{{% /note %}}
|
||||
|
||||
## Usage
|
||||
|
|
|
@ -45,7 +45,6 @@ drop columns such as `_start` and `_stop` to optimize the download file size.
|
|||
| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` |
|
||||
| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` |
|
||||
| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` |
|
||||
| `-p` | `--profilers` | Flux query profilers to enable (comma-separated) | string | |
|
||||
| `-r` | `--raw` | Output raw query results (annotated CSV) | | |
|
||||
| | `--skip-verify` | Skip TLS certificate verification | | |
|
||||
| `-t` | `--token` | Authentication token | string | `INFLUX_TOKEN` |
|
||||
|
@ -57,7 +56,6 @@ drop columns such as `_start` and `_stop` to optimize the download file size.
|
|||
- [Query InfluxDB with a Flux string](#query-influxdb-with-a-flux-string)
|
||||
- [Query InfluxDB using a Flux file](#query-influxdb-with-a-flux-file)
|
||||
- [Query InfluxDB and return annotated CSV](#query-influxdb-and-return-annotated-csv)
|
||||
- [Query InfluxDB and append query profile data to results](#query-influxdb-and-append-query-profile-data-to-results)
|
||||
|
||||
##### Query InfluxDB with a Flux string
|
||||
```sh
|
||||
|
@ -72,13 +70,4 @@ influx query --file /path/to/example-query.flux
|
|||
##### Query InfluxDB and return annotated CSV
|
||||
```sh
|
||||
influx query 'from(bucket:"example-bucket") |> range(start:-1m)' --raw
|
||||
```
|
||||
|
||||
##### Query InfluxDB and append query profile data to results
|
||||
_For more information about profilers, see [Flux profilers](/influxdb/v2.0/reference/flux/stdlib/profiler/#available-profilers)._
|
||||
|
||||
```sh
|
||||
influx query \
|
||||
--profilers operator,query \
|
||||
'from(bucket:"example-bucket") |> range(start:-1m)'
|
||||
```
|
|
@ -10,7 +10,6 @@ alias:
|
|||
- /influxdb/v2.0/reference/cli/influxd/restore/
|
||||
related:
|
||||
- /influxdb/v2.0/backup-restore/restore/
|
||||
- /influxdb/v2.0/reference/cli/influx/backup/
|
||||
- /influxdb/v2.0/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials
|
||||
- /influxdb/v2.0/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions
|
||||
---
|
||||
|
@ -26,14 +25,6 @@ If the restore process fails, InfluxDB preserves the data in the temporary locat
|
|||
_For information about recovering from a failed restore process, see
|
||||
[Restore data](/influxdb/v2.0/backup-restore/restore/#recover-from-a-failed-restore)._
|
||||
|
||||
{{% note %}}
|
||||
#### Cannot restore to existing buckets
|
||||
The `influx restore` command cannot restore data to existing buckets.
|
||||
Use the `--new-bucket` flag to create a bucket with a new name and restore data into it.
|
||||
To restore data and retain bucket names, [delete existing buckets](/influxdb/v2.0/organizations/buckets/delete-bucket/)
|
||||
and then begin the restore process.
|
||||
{{% /note %}}
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
|
@ -65,30 +56,25 @@ influx restore [flags]
|
|||
|
||||
{{< cli/influx-creds-note >}}
|
||||
|
||||
- [Restore backup data](#restore-backup-data)
|
||||
- [Restore backup data for a specific bucket into a new bucket](#restore-backup-data-for-a-specific-bucket-into-a-new-bucket)
|
||||
- [Restore and replace all data](#restore-and-replace-all-data)
|
||||
|
||||
##### Restore backup data
|
||||
```sh
|
||||
influx restore \
|
||||
--input /path/to/backup/dir/
|
||||
```
|
||||
|
||||
##### Restore backup data for a specific bucket into a new bucket
|
||||
```sh
|
||||
influx restore \
|
||||
--bucket example-bucket \
|
||||
--new-bucket new-example-bucket \
|
||||
--input /path/to/backup/dir/
|
||||
```
|
||||
- [Restore backup data to an existing bucket](#restore-backup-data-to-an-existing-bucket)
|
||||
- [Create a bucket and restore data to it](#create-a-bucket-and-restore-data-to-it)
|
||||
|
||||
##### Restore and replace all data
|
||||
{{% note %}}
|
||||
`influx restore --full` restores all time series data _and_ InfluxDB key-value
|
||||
data such as tokens, dashboards, users, etc.
|
||||
{{% /note %}}
|
||||
|
||||
```sh
|
||||
influx restore --full --input /path/to/backup/dir/
|
||||
```
|
||||
|
||||
##### Restore backup data to an existing bucket
|
||||
```sh
|
||||
influx restore \
|
||||
--bucket example-bucket \
|
||||
--input /path/to/backup/dir/
|
||||
```
|
||||
|
||||
##### Create a bucket and restore data to it
|
||||
```sh
|
||||
influx restore \
|
||||
--new-bucket new-example-bucket \
|
||||
--input /path/to/backup/dir/
|
||||
```
|
||||
|
|
|
@ -19,7 +19,7 @@ The `influx setup` command walks through the initial InfluxDB OSS setup process,
|
|||
creating a default user, organization, and bucket.
|
||||
|
||||
{{% note %}}
|
||||
The **Operator token** created in the InfluxDB setup process has full read and write
|
||||
The **Admin token** created in the InfluxDB setup process has full read and write
|
||||
access to all organizations in the database.
|
||||
{{% /note %}}
|
||||
|
||||
|
|
|
@ -23,15 +23,14 @@ influx task [command]
|
|||
```
|
||||
|
||||
### Subcommands
|
||||
| Subcommand | Description |
|
||||
|:---------- |:----------- |
|
||||
| [create](/influxdb/v2.0/reference/cli/influx/task/create) | Create task |
|
||||
| [delete](/influxdb/v2.0/reference/cli/influx/task/delete) | Delete task |
|
||||
| [list](/influxdb/v2.0/reference/cli/influx/task/list) | List tasks |
|
||||
| [log](/influxdb/v2.0/reference/cli/influx/task/log) | Log related commands |
|
||||
| [retry-failed](/influxdb/v2.0/reference/cli/influx/task/retry-failed) | Retry failed task runs |
|
||||
| [run](/influxdb/v2.0/reference/cli/influx/task/run) | Run related commands |
|
||||
| [update](/influxdb/v2.0/reference/cli/influx/task/update) | Update task |
|
||||
| Subcommand | Description |
|
||||
|:---------- |:----------- |
|
||||
| [create](/influxdb/v2.0/reference/cli/influx/task/create) | Create task |
|
||||
| [delete](/influxdb/v2.0/reference/cli/influx/task/delete) | Delete task |
|
||||
| [list](/influxdb/v2.0/reference/cli/influx/task/list) | List tasks |
|
||||
| [log](/influxdb/v2.0/reference/cli/influx/task/log) | Log related commands |
|
||||
| [run](/influxdb/v2.0/reference/cli/influx/task/run) | Run related commands |
|
||||
| [update](/influxdb/v2.0/reference/cli/influx/task/update) | Update task |
|
||||
|
||||
### Flags
|
||||
| Flag | | Description |
|
||||
|
|
|
@ -1,91 +0,0 @@
|
|||
---
|
||||
title: influx task retry-failed
|
||||
description: The `influx task retry-failed` command retries failed InfluxDB task runs.
|
||||
menu:
|
||||
influxdb_2_0_ref:
|
||||
name: influx task retry-failed
|
||||
parent: influx task
|
||||
weight: 201
|
||||
---
|
||||
|
||||
The `influx task retry-failed` command retries failed InfluxDB task runs.
|
||||
## Usage
|
||||
```
|
||||
influx task retry-failed [flags]
|
||||
```
|
||||
|
||||
## Flags
|
||||
| Flag | | Description | Input type | {{< cli/mapped >}} |
|
||||
|:---- |:--- |:----------- |:----------: |:------------------ |
|
||||
| | `--after` | Retry task runs that occurred after this time (RFC3339 timestamp) | string | |
|
||||
| `-c` | `--active-config` | CLI configuration to use for command | string | |
|
||||
| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string |`INFLUX_CONFIGS_PATH` |
|
||||
| | `--before` | Retry task runs that occurred before this time (RFC3339 timestamp) | string | |
|
||||
| | `--dry-run` | Print information about task runs that would be retried | | |
|
||||
| `-h` | `--help` | Help for the `list` command | | |
|
||||
| | `--hide-headers` | Hide table headers (default `false`) | | `INFLUX_HIDE_HEADERS` |
|
||||
| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` |
|
||||
| `-i` | `--id` | Task ID | string | |
|
||||
| | `--json` | Output data as JSON (default `false`) | | `INFLUX_OUTPUT_JSON` |
|
||||
| `-o` | `--org` | Task organization name | string | `INFLUX_ORG` |
|
||||
| | `--org-id` | Task organization ID | string | `INFLUX_ORG_ID` |
|
||||
| | `--run-limit` | Maximum number of failed runs to retry per task (`1-500`, default `100`) | integer | |
|
||||
| | `--skip-verify` | Skip TLS certificate verification | | |
|
||||
| | `--task-limit` | Maximum number of tasks to retry failed runs for (`1-500`, default `100`) | integer | |
|
||||
| `-t` | `--token` | Authentication token | string | `INFLUX_TOKEN` |
|
||||
|
||||
## Examples
|
||||
|
||||
{{< cli/influx-creds-note >}}
|
||||
|
||||
- [Retry failed task runs for a specific task ID](#retry-failed-task-runs-for-a-specific-task-id)
|
||||
- [Retry failed task runs that occurred before a specific time](#retry-failed-task-runs-that-occurred-before-a-specific-time)
|
||||
- [Retry failed task runs that occurred after a specific time](#retry-failed-task-runs-that-occurred-after-a-specific-time)
|
||||
- [Retry failed task runs that occurred in a specific time range](#retry-failed-task-runs-that-occurred-in-a-specific-time-range)
|
||||
- [Retry failed runs for a limited number of tasks](#retry-failed-runs-for-a-limited-number-of-tasks)
|
||||
- [Retry a limited number of failed runs for a task](#retry-a-limited-number-of-failed-runs-for-a-task)
|
||||
- [Print information about runs that will be retried](#print-information-about-runs-that-will-be-retried)
|
||||
|
||||
##### Retry failed task runs for a specific task ID
|
||||
```sh
|
||||
influx task retry-failed \
|
||||
--id 0Xx0oox00XXoxxoo1
|
||||
```
|
||||
|
||||
##### Retry failed task runs that occurred before a specific time
|
||||
```sh
|
||||
influx task retry-failed \
|
||||
--before 2021-01-01T00:00:00Z
|
||||
```
|
||||
|
||||
##### Retry failed task runs that occurred after a specific time
|
||||
```sh
|
||||
influx task retry-failed \
|
||||
--after 2021-01-01T00:00:00Z
|
||||
```
|
||||
|
||||
##### Retry failed task runs that occurred in a specific time range
|
||||
```sh
|
||||
influx task retry-failed \
|
||||
--after 2021-01-01T00:00:00Z \
|
||||
--before 2021-01-01T23:59:59Z
|
||||
```
|
||||
|
||||
##### Retry failed runs for a limited number of tasks
|
||||
```sh
|
||||
influx task retry-failed \
|
||||
--task-limit 5
|
||||
```
|
||||
|
||||
##### Retry a limited number of failed runs for a task
|
||||
```sh
|
||||
influx task retry-failed \
|
||||
--id 0Xx0oox00XXoxxoo1 \
|
||||
--run-limit 5
|
||||
```
|
||||
|
||||
##### Print information about runs that will be retried
|
||||
```sh
|
||||
influx task retry-failed \
|
||||
--dry-run
|
||||
```
|
|
@ -2,24 +2,17 @@
|
|||
title: influx transpile
|
||||
description: >
|
||||
The `influx transpile` command transpiles an InfluxQL query to Flux source code.
|
||||
menu:
|
||||
influxdb_2_0_ref:
|
||||
name: influx transpile
|
||||
parent: influx
|
||||
weight: 101
|
||||
influxdb/v2.0/tags: [influxql, flux]
|
||||
related:
|
||||
- /influxdb/v2.0/reference/cli/influx/#provide-required-authentication-credentials, influx CLI—Provide required authentication credentials
|
||||
- /influxdb/v2.0/reference/cli/influx/#flag-patterns-and-conventions, influx CLI—Flag patterns and conventions
|
||||
---
|
||||
|
||||
{{% warn %}}
|
||||
### Removed in InfluxDB OSS 2.0.5
|
||||
The `influx transpile` command was removed in **InfluxDB 2.0.5**.
|
||||
[Use InfluxQL to query InfluxDB 2.0](/influxdb/v2.0/query-data/influxql/).
|
||||
For information about manually converting InfluxQL queries to Flux, see:
|
||||
|
||||
- [Get started with Flux](/influxdb/v2.0/query-data/get-started/)
|
||||
- [Query data with Flux](/influxdb/v2.0/query-data/flux/)
|
||||
- [Migrate continuous queries to Flux tasks](/influxdb/%762.0/upgrade/v1-to-v2/migrate-cqs/)
|
||||
<!-- Used the hex code in the link above to prevent v2.0 -> cloud replacement -->
|
||||
{{% /warn %}}
|
||||
|
||||
The `influx transpile` command transpiles an InfluxQL query to Flux source code.
|
||||
The transpiled query assumes the bucket name is `<database>/<retention policy>`
|
||||
and includes absolute time ranges using the provided `--now` time.
|
||||
|
|
|
@ -63,31 +63,30 @@ In **extended annotated CSV**, measurements, fields, and values and their types
|
|||
| [dryrun](/influxdb/v2.0/reference/cli/influx/write/dryrun) | Write to stdout instead of InfluxDB |
|
||||
|
||||
## Flags
|
||||
| Flag | | Description | Input type | {{< cli/mapped >}} |
|
||||
|:-----|:--------------------|:---------------------------------------------------------------------------------------------|:-----------:|:----------------------|
|
||||
| `-c` | `--active-config` | CLI configuration to use for command | string | |
|
||||
| `-b` | `--bucket` | Bucket name (mutually exclusive with `--bucket-id`) | string | `INFLUX_BUCKET_NAME` |
|
||||
| | `--bucket-id` | Bucket ID (mutually exclusive with `--bucket`) | string | `INFLUX_BUCKET_ID` |
|
||||
| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` |
|
||||
| | `--compression` | Input compression (`none` or `gzip`, default is `none` unless input file ends with `.gz`.) | string | |
|
||||
| | `--debug` | Output errors to stderr | | |
|
||||
| | `--encoding` | Character encoding of input (default `UTF-8`) | string | |
|
||||
| | `--error-file` | Path to a file used for recording rejected row errors | string | |
|
||||
| `-f` | `--file` | File to import | stringArray | |
|
||||
| | `--format` | Input format (`lp` or `csv`, default `lp`) | string | |
|
||||
| | `--header` | Prepend header line to CSV input data | string | |
|
||||
| `-h` | `--help` | Help for the `write` command | | |
|
||||
| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` |
|
||||
| | `--max-line-length` | Maximum number of bytes that can be read for a single line (default `16000000`) | integer | |
|
||||
| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` |
|
||||
| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` |
|
||||
| `-p` | `--precision` | Precision of the timestamps (default `ns`) | string | `INFLUX_PRECISION` |
|
||||
| | `--rate-limit` | Throttle write rate (examples: `5 MB / 5 min` or `1MB/s`). | string | |
|
||||
| | `--skip-verify` | Skip TLS certificate verification | | |
|
||||
| | `--skipHeader` | Skip first *n* rows of input data | integer | |
|
||||
| | `--skipRowOnError` | Output CSV errors to stderr, but continue processing | | |
|
||||
| `-t` | `--token` | Authentication token | string | `INFLUX_TOKEN` |
|
||||
| `-u` | `--url` | URL to import data from | stringArray | |
|
||||
| Flag | | Description | Input type | {{< cli/mapped >}} |
|
||||
|:-----|:--------------------|:--------------------------------------------------------------------------------|:----------: |:----------------------|
|
||||
| `-c` | `--active-config` | CLI configuration to use for command | string | |
|
||||
| `-b` | `--bucket` | Bucket name (mutually exclusive with `--bucket-id`) | string | `INFLUX_BUCKET_NAME` |
|
||||
| | `--bucket-id` | Bucket ID (mutually exclusive with `--bucket`) | string | `INFLUX_BUCKET_ID` |
|
||||
| | `--configs-path` | Path to `influx` CLI configurations (default `~/.influxdbv2/configs`) | string | `INFLUX_CONFIGS_PATH` |
|
||||
| | `--debug` | Output errors to stderr | | |
|
||||
| | `--encoding` | Character encoding of input (default `UTF-8`) | string | |
|
||||
| | `--error-file` | Path to a file used for recording rejected row errors | string | |
|
||||
| `-f` | `--file` | File to import | stringArray | |
|
||||
| | `--format` | Input format (`lp` or `csv`, default `lp`) | string | |
|
||||
| | `--header` | Prepend header line to CSV input data | string | |
|
||||
| `-h` | `--help` | Help for the `write` command | | |
|
||||
| | `--host` | HTTP address of InfluxDB (default `http://localhost:8086`) | string | `INFLUX_HOST` |
|
||||
| | `--max-line-length` | Maximum number of bytes that can be read for a single line (default `16000000`) | integer | |
|
||||
| `-o` | `--org` | Organization name (mutually exclusive with `--org-id`) | string | `INFLUX_ORG` |
|
||||
| | `--org-id` | Organization ID (mutually exclusive with `--org`) | string | `INFLUX_ORG_ID` |
|
||||
| `-p` | `--precision` | Precision of the timestamps (default `ns`) | string | `INFLUX_PRECISION` |
|
||||
| | `--rate-limit` | Throttle write rate (examples: `5 MB / 5 min` or `1MB/s`). | string | |
|
||||
| | `--skip-verify` | Skip TLS certificate verification | | |
|
||||
| | `--skipHeader` | Skip first *n* rows of input data | integer | |
|
||||
| | `--skipRowOnError` | Output CSV errors to stderr, but continue processing | | |
|
||||
| `-t` | `--token` | Authentication token | string | `INFLUX_TOKEN` |
|
||||
| `-u` | `--url` | URL to import data from | stringArray | |
|
||||
|
||||
## Examples
|
||||
|
||||
|
@ -101,7 +100,6 @@ In **extended annotated CSV**, measurements, fields, and values and their types
|
|||
- [from a URL](#write-line-protocol-from-a-url)
|
||||
- [from multiple URLs](#write-line-protocol-from-multiple-urls)
|
||||
- [from multiple sources](#write-line-protocol-from-multiple-sources)
|
||||
- [from a compressed file](#write-line-protocol-from-a-compressed-file)
|
||||
|
||||
###### Write CSV data
|
||||
|
||||
|
@ -113,7 +111,7 @@ In **extended annotated CSV**, measurements, fields, and values and their types
|
|||
- [from multiple URLs](#write-annotated-csv-data-from-multiple-urls)
|
||||
- [from multiple sources](#write-annotated-csv-data-from-multiple-sources)
|
||||
- [and prepend annotation headers](#prepend-csv-data-with-annotation-headers)
|
||||
- [from a compressed file](#write-annotated-csv-data-from-a-compressed-file)
|
||||
|
||||
|
||||
### Line protocol
|
||||
|
||||
|
@ -165,20 +163,6 @@ influx write \
|
|||
--url https://example.com/line-protocol-2.txt
|
||||
```
|
||||
|
||||
##### Write line protocol from a compressed file
|
||||
```sh
|
||||
# The influx CLI assumes files with the .gz extension use gzip compression
|
||||
influx write \
|
||||
--bucket example-bucket \
|
||||
--file path/to/line-protocol.txt.gz
|
||||
|
||||
# Specify gzip compression for gzipped files without the .gz extension
|
||||
influx write \
|
||||
--bucket example-bucket \
|
||||
--file path/to/line-protocol.txt.comp \
|
||||
--compression gzip
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### CSV
|
||||
|
@ -263,16 +247,4 @@ influx write \
|
|||
--file path/to/data.csv
|
||||
```
|
||||
|
||||
##### Write annotated CSV data from a compressed file
|
||||
```sh
|
||||
# The influx CLI assumes files with the .gz extension use gzip compression
|
||||
influx write \
|
||||
--bucket example-bucket \
|
||||
--file path/to/data.csv.gz
|
||||
|
||||
# Specify gzip compression for gzipped files without the .gz extension
|
||||
influx write \
|
||||
--bucket example-bucket \
|
||||
--file path/to/data.csv.comp \
|
||||
--compression gzip
|
||||
```
|
||||
|
|
|
@ -3,7 +3,7 @@ title: influxd inspect export-lp
|
|||
description: >
|
||||
The `influxd inspect export-lp` command exports all time series data in a bucket
|
||||
as line protocol.
|
||||
influxdb/v2.0/tags: [inspect, export]
|
||||
influxdb/v2.0/tags: [inspect]
|
||||
menu:
|
||||
influxdb_2_0_ref:
|
||||
parent: influxd inspect
|
||||
|
@ -19,56 +19,29 @@ influxd inspect export-lp [flags]
|
|||
```
|
||||
|
||||
## Flags
|
||||
| Flag | | Description | Input type |
|
||||
|:---- |:--- |:----------- |:----------:|
|
||||
| | `--bucket-id` | ({{< req >}}) Bucket ID | string |
|
||||
| | `--compress` | Compress output with GZIP | |
|
||||
| | `--end` | End time to export (RFC3339 format) | string |
|
||||
| | `--engine-path` | ({{< req >}}) Path to persistent InfluxDB engine files | string |
|
||||
| `-h` | `--help` | Help for the `export-lp` command. | |
|
||||
| | `--log-level` | Log-level (`debug`, `info` _(default)_, or `error`) | string |
|
||||
| | `--measurement` | Measurement name(s) to export | strings |
|
||||
| | `--output-path` | ({{< req >}}) Output path (file path or stdout _(`-`)_) | string |
|
||||
| | `--start` | Start time to export (RFC3339 format) | string |
|
||||
| Flag | | Description | Input type |
|
||||
|:---- |:--- |:----------- |:----------:|
|
||||
| | `--bucket-id` | ({{< req >}}) Bucket ID | string |
|
||||
| | `--compress` | Compress output with GZIP | |
|
||||
| | `--end` | End time to export (RFC3339 format) | string |
|
||||
| | `--engine-path` | ({{< req >}}) Path to persistent InfluxDB engine files | string |
|
||||
| `-h` | `--help` | Help for the `export-lp` command. | |
|
||||
| | `--log-level` | Log-level (`debug`, `info` _(default)_, or `error`) | string |
|
||||
| | `--measurement` | Measurement name(s) to export | strings |
|
||||
| | `--output-path` | ({{< req >}}) Output file path | string |
|
||||
| | `--start` | Start time to export (RFC3339 format) | string |
|
||||
|
||||
## Examples
|
||||
|
||||
- [Export all data in a bucket as line protocol](#export-all-data-in-a-bucket-as-line-protocol)
|
||||
- [Export data in measurements as line protocol](#export-data-in-measurements-as-line-protocol)
|
||||
- [Export data in specified time range as line protocol](#export-data-in-specified-time-range-as-line-protocol)
|
||||
|
||||
##### Export all data in a bucket as line protocol
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[To a file](#)
|
||||
[To stdout](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```sh
|
||||
influxd inspect export-lp \
|
||||
--bucket-id 12ab34cd56ef \
|
||||
--engine-path ~/.influxdbv2/engine \
|
||||
--output-path path/to/export.lp
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```sh
|
||||
influxd inspect export-lp \
|
||||
--bucket-id 12ab34cd56ef \
|
||||
--engine-path ~/.influxdbv2/engine \
|
||||
--output-path -
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
##### Export data in measurements as line protocol
|
||||
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[To a file](#)
|
||||
[To stdout](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```sh
|
||||
# Export a single measurement
|
||||
influxd inspect export-lp \
|
||||
|
@ -84,33 +57,8 @@ influxd inspect export-lp \
|
|||
--measurement example-measurement-1 example-measurement-2 \
|
||||
--output-path path/to/export.lp
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```sh
|
||||
# Export a single measurement
|
||||
influxd inspect export-lp \
|
||||
--bucket-id 12ab34cd56ef \
|
||||
--engine-path ~/.influxdbv2/engine \
|
||||
--measurement example-measurement \
|
||||
--output-path -
|
||||
|
||||
# Export multiple measurements
|
||||
influxd inspect export-lp \
|
||||
--bucket-id 12ab34cd56ef \
|
||||
--engine-path ~/.influxdbv2/engine \
|
||||
--measurement example-measurement-1 example-measurement-2 \
|
||||
--output-path -
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
##### Export data in specified time range as line protocol
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[To a file](#)
|
||||
[To stdout](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```sh
|
||||
influxd inspect export-lp \
|
||||
--bucket-id 12ab34cd56ef \
|
||||
|
@ -119,15 +67,3 @@ influxd inspect export-lp \
|
|||
--end 2021-01-31T23:59:59Z \
|
||||
--output-path path/to/export.lp
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```sh
|
||||
influxd inspect export-lp \
|
||||
--bucket-id 12ab34cd56ef \
|
||||
--engine-path ~/.influxdbv2/engine \
|
||||
--start 2021-01-01T00:00:00Z \
|
||||
--end 2021-01-31T23:59:59Z \
|
||||
--output-path -
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
|
|
@ -98,19 +98,15 @@ To configure InfluxDB, use the following configuration options when starting the
|
|||
- [e2e-testing](#e2e-testing)
|
||||
- [engine-path](#engine-path)
|
||||
- [http-bind-address](#http-bind-address)
|
||||
- [http-idle-timeout](#http-idle-timeout)
|
||||
- [http-read-header-timeout](#http-read-header-timeout)
|
||||
- [http-read-timeout](#http-read-timeout)
|
||||
- [http-write-timeout](#http-write-timeout)
|
||||
- [influxql-max-select-buckets](#influxql-max-select-buckets)
|
||||
- [influxql-max-select-point](#influxql-max-select-point)
|
||||
- [influxql-max-select-series](#influxql-max-select-series)
|
||||
- [log-level](#log-level)
|
||||
- [metrics-disabled](#metrics-disabled)
|
||||
- [nats-max-payload-bytes](#nats-max-payload-bytes)
|
||||
- [nats-port](#nats-port)
|
||||
- [new-meta-store](#new-meta-store)
|
||||
- [new-meta-store-read-only](#new-meta-store-read-only)
|
||||
- [no-tasks](#no-tasks)
|
||||
- [pprof-disabled](#pprof-disabled)
|
||||
- [query-concurrency](#query-concurrency)
|
||||
- [query-initial-memory-bytes](#query-initial-memory-bytes)
|
||||
- [query-max-memory-bytes](#query-max-memory-bytes)
|
||||
|
@ -136,7 +132,6 @@ To configure InfluxDB, use the following configuration options when starting the
|
|||
- [storage-validate-keys](#storage-validate-keys)
|
||||
- [storage-wal-fsync-delay](#storage-wal-fsync-delay)
|
||||
- [store](#store)
|
||||
- [testing-always-allow-setup](#testing-always-allow-setup)
|
||||
- [tls-cert](#tls-cert)
|
||||
- [tls-key](#tls-key)
|
||||
- [tls-min-version](#tls-min-version)
|
||||
|
@ -393,214 +388,6 @@ http-bind-address = ":8086"
|
|||
|
||||
---
|
||||
|
||||
### http-idle-timeout
|
||||
Maximum duration the server should keep established connections alive while waiting for new requests.
|
||||
Set to `0` for no timeout.
|
||||
|
||||
**Default:** `3m0s`
|
||||
|
||||
| influxd flag | Environment variable | Configuration key |
|
||||
|:------------ |:-------------------- |:----------------- |
|
||||
| `--http-idle-timeout` | `INFLUXD_HTTP_IDLE_TIMEOUT` | `http-idle-timeout` |
|
||||
|
||||
###### influxd flag
|
||||
```sh
|
||||
influxd --http-idle-timeout=3m0s
|
||||
```
|
||||
|
||||
###### Environment variable
|
||||
```sh
|
||||
export INFLUXD_HTTP_IDLE_TIMEOUT=3m0s
|
||||
```
|
||||
|
||||
###### Configuration file
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[YAML](#)
|
||||
[TOML](#)
|
||||
[JSON](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```yml
|
||||
http-idle-timeout: 3m0s
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```toml
|
||||
http-idle-timeout = "3m0s"
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```json
|
||||
{
|
||||
"http-idle-timeout": "3m0s"
|
||||
}
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
---
|
||||
|
||||
### http-read-header-timeout
|
||||
Maximum duration the server should try to read HTTP headers for new requests.
|
||||
Set to `0` for no timeout.
|
||||
|
||||
**Default:** `10s`
|
||||
|
||||
| influxd flag | Environment variable | Configuration key |
|
||||
|:------------ |:-------------------- |:----------------- |
|
||||
| `--http-read-header-timeout` | `INFLUXD_HTTP_READ_HEADER_TIMEOUT` | `http-read-header-timeout` |
|
||||
|
||||
###### influxd flag
|
||||
```sh
|
||||
influxd --http-read-header-timeout=10s
|
||||
```
|
||||
|
||||
###### Environment variable
|
||||
```sh
|
||||
export INFLUXD_HTTP_READ_HEADER_TIMEOUT=10s
|
||||
```
|
||||
|
||||
###### Configuration file
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[YAML](#)
|
||||
[TOML](#)
|
||||
[JSON](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```yml
|
||||
http-read-header-timeout: 10s
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```toml
|
||||
http-read-header-timeout = "10s"
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```json
|
||||
{
|
||||
"http-read-header-timeout": "10s"
|
||||
}
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
---
|
||||
|
||||
### http-read-timeout
|
||||
Maximum duration the server should try to read the entirety of new requests.
|
||||
Set to `0` for no timeout.
|
||||
|
||||
**Default:** `0`
|
||||
|
||||
{{% note %}}
|
||||
#### Set timeouts specific to your workload
|
||||
Although no `http-read-timeout` is set by default, we **strongly recommend**
|
||||
setting a timeout specific to your workload.
|
||||
HTTP timeouts protect against large amounts of open connections that could
|
||||
potentially hurt performance.
|
||||
{{% /note %}}
|
||||
|
||||
| influxd flag | Environment variable | Configuration key |
|
||||
|:------------ |:-------------------- |:----------------- |
|
||||
| `--http-read-timeout` | `INFLUXD_HTTP_READ_TIMEOUT` | `http-read-timeout` |
|
||||
|
||||
###### influxd flag
|
||||
```sh
|
||||
influxd --http-read-timeout=10s
|
||||
```
|
||||
|
||||
###### Environment variable
|
||||
```sh
|
||||
export INFLUXD_HTTP_READ_TIMEOUT=10s
|
||||
```
|
||||
|
||||
###### Configuration file
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[YAML](#)
|
||||
[TOML](#)
|
||||
[JSON](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```yml
|
||||
http-read-timeout: 10s
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```toml
|
||||
http-read-timeout = "10s"
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```json
|
||||
{
|
||||
"http-read-timeout": "10s"
|
||||
}
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
---
|
||||
|
||||
### http-write-timeout
|
||||
Maximum duration the server should spend processing and responding to write requests.
|
||||
Set to `0` for no timeout.
|
||||
|
||||
**Default:** `0`
|
||||
|
||||
{{% note %}}
|
||||
#### Set timeouts specific to your workload
|
||||
Although no `http-write-timeout` is set by default, we **strongly recommend**
|
||||
setting a timeout specific to your workload.
|
||||
HTTP timeouts protect against large amounts of open connections that could
|
||||
potentially hurt performance.
|
||||
{{% /note %}}
|
||||
|
||||
| influxd flag | Environment variable | Configuration key |
|
||||
|:------------ |:-------------------- |:----------------- |
|
||||
| `--http-write-timeout` | `INFLUXD_HTTP_WRITE_TIMEOUT` | `http-write-timeout` |
|
||||
|
||||
###### influxd flag
|
||||
```sh
|
||||
influxd --http-write-timeout=10s
|
||||
```
|
||||
|
||||
###### Environment variable
|
||||
```sh
|
||||
export INFLUXD_HTTP_WRITE_TIMEOUT=10s
|
||||
```
|
||||
|
||||
###### Configuration file
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[YAML](#)
|
||||
[TOML](#)
|
||||
[JSON](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```yml
|
||||
http-write-timeout: 10s
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```toml
|
||||
http-write-timeout = "10s"
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```json
|
||||
{
|
||||
"http-write-timeout": "10s"
|
||||
}
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
---
|
||||
|
||||
### influxql-max-select-buckets
|
||||
Maximum number of group by time buckets a `SELECT` statement can create.
|
||||
`0` allows an unlimited number of buckets.
|
||||
|
@ -795,53 +582,6 @@ log-level = "info"
|
|||
|
||||
---
|
||||
|
||||
### metrics-disabled
|
||||
Disable the HTTP `/metrics` endpoint which exposes internal InfluxDB metrics.
|
||||
|
||||
**Default:** `false`
|
||||
|
||||
| influxd flag | Environment variable | Configuration key |
|
||||
|:------------ |:-------------------- |:----------------- |
|
||||
| `--metrics-disabled` | `INFLUXD_METRICS_DISABLED` | `metrics-disabled` |
|
||||
|
||||
###### influxd flag
|
||||
```sh
|
||||
influxd --metrics-disabled
|
||||
```
|
||||
|
||||
###### Environment variable
|
||||
```sh
|
||||
export INFLUXD_METRICS_DISABLED=true
|
||||
```
|
||||
|
||||
###### Configuration file
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[YAML](#)
|
||||
[TOML](#)
|
||||
[JSON](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```yml
|
||||
metrics-disabled: true
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```toml
|
||||
metrics-disabled = true
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```json
|
||||
{
|
||||
"metrics-disabled": true
|
||||
}
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
---
|
||||
|
||||
### nats-max-payload-bytes
|
||||
Maximum number of bytes allowed in a NATS message payload.
|
||||
|
||||
|
@ -936,6 +676,102 @@ nats-port = -1
|
|||
|
||||
---
|
||||
|
||||
### new-meta-store
|
||||
Enable the new meta store.
|
||||
|
||||
**Default:** `false`
|
||||
|
||||
| influxd flag | Environment variable | Configuration key |
|
||||
|:------------ |:-------------------- |:----------------- |
|
||||
| `--new-meta-store` | `INFLUXD_NEW_META_STORE` | `new-meta-store` |
|
||||
|
||||
###### influxd flag
|
||||
```sh
|
||||
influxd --new-meta-store
|
||||
```
|
||||
|
||||
###### Environment variable
|
||||
```sh
|
||||
export INFLUXD_NEW_META_STORE=true
|
||||
```
|
||||
|
||||
###### Configuration file
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[YAML](#)
|
||||
[TOML](#)
|
||||
[JSON](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```yml
|
||||
new-meta-store: true
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```toml
|
||||
new-meta-store = true
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```json
|
||||
{
|
||||
"new-meta-store": true
|
||||
}
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
---
|
||||
|
||||
### new-meta-store-read-only
|
||||
Toggle read-only mode for the new meta store.
|
||||
If `true`, reads are duplicated between old and new meta stores
|
||||
(if [new meta store](#new-meta-store) is enabled).
|
||||
|
||||
**Default:** `true`
|
||||
|
||||
| influxd flag | Environment variable | Configuration key |
|
||||
|:------------ |:-------------------- |:----------------- |
|
||||
| `--new-meta-store-read-only` | `INFLUXD_NEW_META_STORE_READ_ONLY` | `new-meta-store-read-only` |
|
||||
|
||||
###### influxd flag
|
||||
```sh
|
||||
influxd --new-meta-store-read-only
|
||||
```
|
||||
|
||||
###### Environment variable
|
||||
```sh
|
||||
export INFLUXD_NEW_META_STORE_READ_ONLY=true
|
||||
```
|
||||
|
||||
###### Configuration file
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[YAML](#)
|
||||
[TOML](#)
|
||||
[JSON](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```yml
|
||||
new-meta-store-read-only: true
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```toml
|
||||
new-meta-store-read-only = true
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```json
|
||||
{
|
||||
"new-meta-store-read-only": true
|
||||
}
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
---
|
||||
|
||||
### no-tasks
|
||||
Disable the task scheduler.
|
||||
If problematic tasks prevent InfluxDB from starting, use this option to start
|
||||
|
@ -985,54 +821,6 @@ no-tasks = true
|
|||
|
||||
---
|
||||
|
||||
### pprof-disabled
|
||||
Disable the `/debug/pprof` HTTP endpoint.
|
||||
This endpoint provides runtime profiling data and can be helpful when debugging.
|
||||
|
||||
**Default:** `false`
|
||||
|
||||
| influxd flag | Environment variable | Configuration key |
|
||||
|:-------------------|:-------------------------|:------------------|
|
||||
| `--pprof-disabled` | `INFLUXD_PPROF_DISABLED` | `pprof-disabled` |
|
||||
|
||||
###### influxd flag
|
||||
```sh
|
||||
influxd --pprof-disabled
|
||||
```
|
||||
|
||||
###### Environment variable
|
||||
```sh
|
||||
export INFLUXD_PPROF_DISABLED=true
|
||||
```
|
||||
|
||||
###### Configuration file
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[YAML](#)
|
||||
[TOML](#)
|
||||
[JSON](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```yml
|
||||
pprof-disabled: true
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```toml
|
||||
pprof-disabled = true
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```json
|
||||
{
|
||||
"pprof-disabled": true
|
||||
}
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
---
|
||||
|
||||
### query-concurrency
|
||||
Number of queries allowed to execute concurrently.
|
||||
|
||||
|
@ -2249,54 +2037,6 @@ store = "bolt"
|
|||
|
||||
---
|
||||
|
||||
### testing-always-allow-setup
|
||||
Ensures the `/api/v2/setup` endpoint always returns `true` to allow onboarding.
|
||||
This configuration option is primary used in continuous integration tests.
|
||||
|
||||
**Default:** `false`
|
||||
|
||||
| influxd flag | Environment variable | Configuration key |
|
||||
|:------------ |:-------------------- |:----------------- |
|
||||
| `--testing-always-allow-setup` | `INFLUXD_TESTING_ALWAYS_ALLOW_SETUP` | `testing-always-allow-setup` |
|
||||
|
||||
###### influxd flag
|
||||
```sh
|
||||
influxd --testing-always-allow-setup
|
||||
```
|
||||
|
||||
###### Environment variable
|
||||
```sh
|
||||
export INFLUXD_TESTING_ALWAYS_ALLOW_SETUP=true
|
||||
```
|
||||
|
||||
###### Configuration file
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[YAML](#)
|
||||
[TOML](#)
|
||||
[JSON](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```yml
|
||||
testing-always-allow-setup: true
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```toml
|
||||
testing-always-allow-setup = true
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```json
|
||||
{
|
||||
"testing-always-allow-setup": true
|
||||
}
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
---
|
||||
|
||||
### tls-cert
|
||||
Path to TLS certificate file.
|
||||
Requires the [`tls-key`](#tls-key) to be set.
|
||||
|
|
|
@ -161,7 +161,7 @@ Define a custom polygon region using a record containing the following propertie
|
|||
```
|
||||
|
||||
## GIS geometry definitions
|
||||
Many functions in the Geo package operate on data using geographic information system (GIS) data.
|
||||
Many functions in the Geo package manipulate data based on geographic information system (GIS) data.
|
||||
Define GIS geometry using the following:
|
||||
|
||||
- Any [region type](#region-definitions) _(typically [point](#point))_
|
||||
|
|
|
@ -65,7 +65,7 @@ include a table with the following columns:
|
|||
|
||||
### operator
|
||||
The `operator` profiler output statistics about each operation in a query.
|
||||
[Operations executed in the storage tier](/influxdb/v2.0/query-data/optimize-queries/#start-queries-with-pushdowns)
|
||||
[Operations executed in the storage tier](/influxdb/v2.0/query-data/optimize-queries/#start-queries-with-pushdown-functions)
|
||||
return as a single operation.
|
||||
When the `operator` profile is enabled, results returned by [`yield()`](/influxdb/v2.0/reference/flux/stdlib/built-in/outputs/yield/)
|
||||
include a table with a row for each operation and the following columns:
|
||||
|
|
|
@ -96,7 +96,7 @@ boolean values are annotated with the `boolean` datatype.
|
|||
### bucket
|
||||
|
||||
A bucket is a named location where time series data is stored.
|
||||
All buckets have a [retention period](#retention-period).
|
||||
All buckets have a retention policy, a duration of time that each data point persists.
|
||||
A bucket belongs to an organization.
|
||||
|
||||
## C
|
||||
|
@ -773,12 +773,6 @@ A tuple of named values represented using a record type.
|
|||
|
||||
Regular expressions (regex or regexp) are patterns used to match character combinations in strings.
|
||||
|
||||
### retention period
|
||||
The duration of time that a bucket retains data.
|
||||
Points with timestamps older than their bucket's retention period are dropped.
|
||||
|
||||
Related entries: [bucket](#bucket), [shard group duration](#shard-group-duration)
|
||||
|
||||
<!--### replication factor
|
||||
|
||||
The attribute of the retention policy that determines how many copies of the data are stored in the cluster. InfluxDB replicates data across N data nodes, where N is the replication factor.
|
||||
|
@ -788,16 +782,16 @@ To maintain data availability for queries, the replication factor should be less
|
|||
Data is fully available when the replication factor is greater than the number of unavailable data nodes.
|
||||
Data may be unavailable when the replication factor is less than the number of unavailable data nodes.
|
||||
Any replication factor greater than two gives you additional fault tolerance and query capacity within the cluster.
|
||||
-->
|
||||
|
||||
### retention policy (RP)
|
||||
Retention policy is an InfluxDB 1.x concept that represents the duration of time
|
||||
that each data point in the retention policy persists.
|
||||
The InfluxDB 2.x equivalent is [retention period](#retention-period).
|
||||
For more information about retention policies, see the
|
||||
[latest 1.x documentation](/{{< latest "influxdb" "v1" >}}/concepts/glossary/#retention-policy-rp).
|
||||
|
||||
Related entries: [retention period](#retention-period),
|
||||
Retention policy is a duration of time that each data point persists. Retention policies are specified in a bucket.
|
||||
|
||||
<!--Retention polices describe how many copies of the data is stored in the cluster (replication factor), and the time range covered by shard groups (shard group duration). Retention policies are unique per bucket.
|
||||
|
||||
Related entries: [duration](#duration), [measurement](#measurement), [replication factor](#replication-factor), [series](#series), [shard duration](#shard-duration), [tag set](#tag-set)
|
||||
|
||||
-->
|
||||
|
||||
### RFC3339 timestamp
|
||||
A timestamp that uses the human readable DateTime format proposed in
|
||||
|
@ -909,50 +903,36 @@ Service input plugins listen on a socket for known protocol inputs, or apply the
|
|||
|
||||
Related entries: [aggregator plugin](#aggregator-plugin), [input plugin](#input-plugin), [output plugin](#output-plugin), [processor plugin](#processor-plugin)
|
||||
|
||||
### shard
|
||||
<!--### shard
|
||||
|
||||
A shard contains encoded and compressed data for a specific set of [series](#series).
|
||||
A shard consists of one or more [TSM files](#tsm-time-structured-merge-tree) on disk.
|
||||
All points in a series in a given shard group are stored in the same shard (TSM file) on disk.
|
||||
A shard belongs to a single [shard group](#shard-group).
|
||||
A shard contains encoded and compressed data. Shards are represented by a TSM file on disk.
|
||||
Every shard belongs to one and only one shard group.
|
||||
Multiple shards may exist in a single shard group.
|
||||
Each shard contains a specific set of series.
|
||||
All points falling on a given series in a given shard group will be stored in the same shard (TSM file) on disk.
|
||||
|
||||
For more information, see [Shards and shard groups (OSS)](/influxdb/%762.0/reference/internals/shards/).
|
||||
Related entries: [series](#series), [shard duration](#shard-duration), [shard group](#shard-group), [tsm](#tsm-time-structured-merge-tree)
|
||||
|
||||
Related entries: [series](#series), [shard duration](#shard-duration),
|
||||
[shard group](#shard-group), [tsm](#tsm-time-structured-merge-tree)
|
||||
|
||||
### shard group
|
||||
|
||||
<<<<<<< HEAD
|
||||
Shard groups are logical containers for shards and contain all shards with data
|
||||
for a specified interval known as the [shard group duration](#shard-group-duration).
|
||||
Every bucket that contains data has at least one shard group.
|
||||
=======
|
||||
Shard groups are logical containers for shards organized by [bucket](#bucket).
|
||||
Every bucket with data has at least one shard group.
|
||||
A shard group contains all shards with data for the time interval covered by the shard group.
|
||||
The interval spanned by each shard group is the [shard group duration](#shard-group-duration).
|
||||
>>>>>>> cb4186ba2075d97882bc5974623adbdd1e88934a
|
||||
|
||||
For more information, see [Shards and shard groups (OSS)](/influxdb/%762.0/reference/internals/shards/).
|
||||
|
||||
Related entries: [bucket](#bucket), [retention period](#retention-period),
|
||||
[series](#series), [shard](#shard), [shard duration](#shard-duration)
|
||||
|
||||
### shard group duration
|
||||
|
||||
The duration of time or interval that each [shard group](#shard-group) covers. Set the `shard-group-duration` for each [bucket](#bucket).
|
||||
|
||||
For more information, see:
|
||||
|
||||
- [Shards and shard groups (OSS)](/influxdb/%762.0/reference/internals/shards/)
|
||||
- [Manage buckets](/influxdb/v2.0/organizations/buckets/)
|
||||
### shard duration
|
||||
|
||||
The shard duration determines how much time each shard group spans.
|
||||
The specific interval is determined by the `SHARD DURATION` of the retention policy.
|
||||
<!-- See [Retention Policy management](/{{< latest "influxdb" "v1" >}}/query_language/manage-database/#retention-policy-management) for more information.
|
||||
|
||||
For example, given a retention policy with `SHARD DURATION` set to `1w`, each shard group will span a single week and contain all points with timestamps in that week.
|
||||
|
||||
Related entries: [database](#database), [retention policy](#retention-policy-rp), [series](/#series), [shard](#shard), [shard group](#shard-group)
|
||||
|
||||
### shard group
|
||||
|
||||
Shard groups are logical containers for shards.
|
||||
Shard groups are organized by time and retention policy.
|
||||
Every retention policy that contains data has at least one associated shard group.
|
||||
A given shard group contains all shards with data for the interval covered by the shard group.
|
||||
The interval spanned by each shard group is the shard duration.
|
||||
|
||||
Related entries: [database](#database), [retention policy](#retention-policy-rp), [series](/#series), [shard](#shard), [shard duration](#shard-duration)
|
||||
|
||||
-->
|
||||
|
||||
### Single Stat
|
||||
|
@ -1075,8 +1055,8 @@ Related entries: [point](#point), [unix timestamp](#unix-timestamp), [RFC3339 ti
|
|||
Tokens (or authentication tokens) verify user and organization permissions in InfluxDB.
|
||||
There are different types of athentication tokens:
|
||||
|
||||
- **Operator token:** grants full read and write access to all resources in **all organizations in InfluxDB OSS 2.x**.
|
||||
_InfluxDB Cloud does not support Operator tokens._
|
||||
- **Admin token:** grants full read and write access to all resources in **all organizations in InfluxDB OSS 2.x**.
|
||||
_InfluxDB Cloud does not support Admin tokens._
|
||||
- **All-Access token:** grants full read and write access to all resources in an organization.
|
||||
- **Read/Write token:** grants read or write access to specific resources in an organization.
|
||||
|
||||
|
|
|
@ -1,246 +0,0 @@
|
|||
---
|
||||
title: InfluxDB file system layout
|
||||
description: >
|
||||
The InfluxDB file system layout depends on the operating system, package manager,
|
||||
or containerization platform used to install InfluxDB.
|
||||
weight: 102
|
||||
menu:
|
||||
influxdb_2_0_ref:
|
||||
name: File system layout
|
||||
parent: InfluxDB internals
|
||||
influxdb/v2.0/tags: [storage, internals]
|
||||
---
|
||||
|
||||
The InfluxDB file system layout depends on the operating system, installation method,
|
||||
or containerization platform used to install InfluxDB.
|
||||
|
||||
- [InfluxDB file structure](#influxdb-file-structure)
|
||||
- [File system layout](#file-system-layout)
|
||||
|
||||
## InfluxDB file structure
|
||||
The InfluxDB file structure includes of the following:
|
||||
|
||||
#### Engine path
|
||||
Directory path to the [storage engine](/{{< latest "influxdb" >}}/reference/internals/storage-engine/),
|
||||
where InfluxDB stores time series data, includes the following directories:
|
||||
|
||||
- **data**: stores Time-Structured Merge Tree (TSM) files
|
||||
- **wal**: stores Write Ahead Log (WAL) files.
|
||||
|
||||
To customize this path, use the [engine-path](/influxdb/v2.0/reference/config-options/#engine-path)
|
||||
configuration option.
|
||||
|
||||
#### Bolt path
|
||||
File path to the [Boltdb](https://github.com/boltdb/bolt) database, a file-based
|
||||
key-value store for non-time series data, such as InfluxDB users, dashboards, tasks, etc.
|
||||
To customize this path, use the [bolt-path](/influxdb/v2.0/reference/config-options/#bolt-path)
|
||||
configuration option.
|
||||
|
||||
#### Configs path
|
||||
File path to [`influx` CLI connection configurations](/influxdb/v2.0/reference/cli/influx/config/) (configs).
|
||||
To customize this path, use the `--configs-path` flag with `influx` CLI commands.
|
||||
|
||||
#### InfluxDB configuration files
|
||||
Some operating systems and package managers store a default InfluxDB (`influxd`) configuration file on disk.
|
||||
For more information about using InfluxDB configuration files, see
|
||||
[Configuration options](/influxdb/v2.0/reference/config-options/).
|
||||
|
||||
## File system layout
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
[macOS](#)
|
||||
[Linux](#)
|
||||
[Windows](#)
|
||||
[Docker](#)
|
||||
[Kubernetes](#)
|
||||
{{% /tabs %}}
|
||||
<!---------------------------- BEGIN MACOS CONTENT ---------------------------->
|
||||
{{% tab-content %}}
|
||||
|
||||
#### macOS default paths
|
||||
| Path | Default |
|
||||
|:---- |:------- |
|
||||
| [Engine path](#engine-path) | `~/.influxdbv2/engine/` |
|
||||
| [Bolt path](#bolt-path) | `~/.influxdbv2/influxd.bolt` |
|
||||
| [Configs path](#configs-path) | `~/.influxdbv2/configs` |
|
||||
|
||||
#### macOS file system overview
|
||||
{{% filesystem-diagram %}}
|
||||
- ~/.influxdbv2/
|
||||
- engine/
|
||||
- data/
|
||||
- _<span style="opacity:.4">TSM directories and files</span>_
|
||||
- wal/
|
||||
- _<span style="opacity:.4">WAL directories and files</span>_
|
||||
- configs
|
||||
- influxd.bolt
|
||||
{{% /filesystem-diagram %}}
|
||||
{{% /tab-content %}}
|
||||
<!----------------------------- END MACOS CONTENT ----------------------------->
|
||||
|
||||
<!---------------------------- BEGIN LINUX CONTENT ---------------------------->
|
||||
{{% tab-content %}}
|
||||
When installing InfluxDB on Linux, you can download and install the `influxd` binary,
|
||||
or you can use a package manager.
|
||||
Which installation method you use determines the file system layout.
|
||||
|
||||
- [Installed as a standalone binary](#installed-as-a-standalone-binary)
|
||||
- [Installed as a package](#installed-as-a-package)
|
||||
|
||||
### Installed as a standalone binary
|
||||
|
||||
#### Linux default paths (standalone binary)
|
||||
| Path | Default |
|
||||
|:---- |:------- |
|
||||
| [Engine path](#engine-path) | `~/.influxdbv2/engine/` |
|
||||
| [Bolt path](#bolt-path) | `~/.influxdbv2/influxd.bolt` |
|
||||
| [Configs path](#configs-path) | `~/.influxdbv2/configs` |
|
||||
|
||||
#### Linux file system overview (standalone binary)
|
||||
{{% filesystem-diagram %}}
|
||||
- ~/.influxdbv2/
|
||||
- engine/
|
||||
- data/
|
||||
- _<span style="opacity:.4">TSM directories and files</span>_
|
||||
- wal/
|
||||
- _<span style="opacity:.4">WAL directories and files</span>_
|
||||
- configs
|
||||
- influxd.bolt
|
||||
{{% /filesystem-diagram %}}
|
||||
|
||||
### Installed as a package
|
||||
InfluxDB 2.0 supports **.deb-** and **.rpm-based** Linux package managers.
|
||||
The file system layout is the same with each.
|
||||
|
||||
#### Linux default paths (package)
|
||||
| Path | Default |
|
||||
|:---- |:------- |
|
||||
| [Engine path](#engine-path) | `/var/lib/influxdb/engine/` |
|
||||
| [Bolt path](#bolt-path) | `/var/lib/influxdb/influxd.bolt` |
|
||||
| [Configs path](#configs-path) | `/var/lib/influxdb/configs` |
|
||||
| [Default config file path](#influxdb-configuration-files) | `/etc/influxdb/config.toml` |
|
||||
|
||||
#### Linux file system overview (package)
|
||||
{{% filesystem-diagram %}}
|
||||
- /var/lib/influxdb/
|
||||
- engine/
|
||||
- data/
|
||||
- _<span style="opacity:.4">TSM directories and files</span>_
|
||||
- wal/
|
||||
- _<span style="opacity:.4">WAL directories and files</span>_
|
||||
- configs
|
||||
- influxd.bolt
|
||||
- /etc/influxdb/
|
||||
- config.toml _<span style="opacity:.4">(influxd configuration file)</span>_
|
||||
{{% /filesystem-diagram %}}
|
||||
{{% /tab-content %}}
|
||||
<!----------------------------- END LINUX CONTENT ----------------------------->
|
||||
|
||||
<!--------------------------- BEGIN WINDOWS CONTENT --------------------------->
|
||||
{{% tab-content %}}
|
||||
|
||||
#### Windows default paths
|
||||
| Path | Default |
|
||||
|:---- |:------- |
|
||||
| [Engine path](#engine-path) | `%USERPROFILE%\.influxdbv2\engine\` |
|
||||
| [Bolt path](#bolt-path) | `%USERPROFILE%\.influxdbv2\influxd.bolt` |
|
||||
| [Configs path](#configs-path) | `%USERPROFILE%\.influxdbv2\configs` |
|
||||
|
||||
#### Windows file system overview
|
||||
{{% filesystem-diagram %}}
|
||||
- %USERPROFILE%\\.influxdbv2\
|
||||
- engine\
|
||||
- data\
|
||||
- _<span style="opacity:.4">TSM directories and files</span>_
|
||||
- wal\
|
||||
- _<span style="opacity:.4">WAL directories and files</span>_
|
||||
- configs
|
||||
- influxd.bolt
|
||||
{{% /filesystem-diagram %}}
|
||||
{{% /tab-content %}}
|
||||
<!---------------------------- END WINDOWS CONTENT ---------------------------->
|
||||
|
||||
<!---------------------------- BEGIN DOCKER CONTENT --------------------------->
|
||||
{{% tab-content %}}
|
||||
InfluxDB Docker images are available from both [Dockerhub](https://hub.docker.com/_/influxdb)
|
||||
and [Quay.io](https://quay.io/repository/influxdb/influxdb?tab=tags).
|
||||
Each have a unique InfluxDB file system layout.
|
||||
|
||||
- [Dockerhub](#dockerhub)
|
||||
- [Quay.io](#quayio)
|
||||
|
||||
### Dockerhub
|
||||
|
||||
{{% note %}}
|
||||
The InfluxDB Dockerhub image uses `/var/lib/influxdb2` instead of `/var/lib/influxdb`
|
||||
so you can easily mount separate volumes for InfluxDB 1.x and 2.x data during the
|
||||
[upgrade process](/influxdb/v2.0/upgrade/v1-to-v2/docker/).
|
||||
{{% /note %}}
|
||||
|
||||
#### Dockerhub default paths
|
||||
| Path | Default |
|
||||
|:---- |:------- |
|
||||
| [Engine path](#engine-path) | `/var/lib/influxdb2/engine/` |
|
||||
| [Bolt path](#bolt-path) | `/var/lib/influxdb2/influxd.bolt` |
|
||||
| [Configs path](#configs-path) | `/etc/influxdb2/configs` |
|
||||
|
||||
#### Dockerhub file system overview
|
||||
{{% filesystem-diagram %}}
|
||||
- /var/lib/influxdb2/
|
||||
- engine/
|
||||
- data/
|
||||
- _<span style="opacity:.4">TSM directories and files</span>_
|
||||
- wal/
|
||||
- _<span style="opacity:.4">WAL directories and files</span>_
|
||||
- influxd.bolt
|
||||
- /etc/influxdb2/
|
||||
- configs
|
||||
{{% /filesystem-diagram %}}
|
||||
|
||||
### Quay.io
|
||||
|
||||
#### Quay default paths
|
||||
| Path | Default |
|
||||
|:---- |:------- |
|
||||
| [Engine path](#engine-path) | `/root/.influxdbv2/engine/` |
|
||||
| [Bolt path](#bolt-path) | `/root/.influxdbv2/influxd.bolt` |
|
||||
| [Configs path](#configs-path) | `/root/.influxdbv2/configs` |
|
||||
|
||||
#### Quay file system overview
|
||||
{{% filesystem-diagram %}}
|
||||
- /root/.influxdbv2/
|
||||
- engine/
|
||||
- data/
|
||||
- _<span style="opacity:.4">TSM directories and files</span>_
|
||||
- wal/
|
||||
- _<span style="opacity:.4">WAL directories and files</span>_
|
||||
- configs
|
||||
- influxd.bolt
|
||||
{{% /filesystem-diagram %}}
|
||||
{{% /tab-content %}}
|
||||
<!----------------------------- END DOCKER CONTENT ---------------------------->
|
||||
|
||||
<!-------------------------- BEGIN KUBERNETES CONTENT ------------------------->
|
||||
{{% tab-content %}}
|
||||
#### Kubernetes default paths
|
||||
| Path | Default |
|
||||
|:---- |:------- |
|
||||
| [Engine path](#engine-path) | `/var/lib/influxdb2/engine/` |
|
||||
| [Bolt path](#bolt-path) | `/var/lib/influxdb2/influxd.bolt` |
|
||||
| [Configs path](#configs-path) | `/etc/influxdb2/configs` |
|
||||
|
||||
#### Kubernetes file system overview
|
||||
{{% filesystem-diagram %}}
|
||||
- /var/lib/influxdb2/
|
||||
- engine/
|
||||
- data/
|
||||
- _<span style="opacity:.4">TSM directories and files</span>_
|
||||
- wal/
|
||||
- _<span style="opacity:.4">WAL directories and files</span>_
|
||||
- influxd.bolt
|
||||
- /etc/influxdb2/
|
||||
- configs
|
||||
{{% /filesystem-diagram %}}
|
||||
{{% /tab-content %}}
|
||||
<!--------------------------- END KUBERNETES CONTENT -------------------------->
|
||||
{{< /tabs-wrapper >}}
|
|
@ -1,145 +0,0 @@
|
|||
---
|
||||
title: InfluxDB shards and shard groups
|
||||
description: >
|
||||
Learn the relationships between buckets, shards, and shard groups.
|
||||
InfluxDB organizes time series data into **shards** when storing data to disk.
|
||||
Shards are grouped into **shard groups**.
|
||||
menu:
|
||||
influxdb_2_0_ref:
|
||||
name: Shards & shard groups
|
||||
parent: InfluxDB internals
|
||||
weight: 102
|
||||
influxdb/v2.0/tags: [storage, internals]
|
||||
related:
|
||||
- /influxdb/v2.0/reference/internals/storage-engine/
|
||||
- /influxdb/v2.0/organizations/buckets/
|
||||
- /influxdb/v2.0/reference/cli/influx/bucket/
|
||||
---
|
||||
|
||||
InfluxDB organizes time series data into **shards** when storing data to disk.
|
||||
Shards are grouped into **shard groups**.
|
||||
Learn the relationships between buckets, shards, and shard groups.
|
||||
|
||||
- [Shards](#shards)
|
||||
- [Shard groups](#shard-groups)
|
||||
- [Shard group duration](#shard-group-duration)
|
||||
- [Shard group diagram](#shard-group-diagram)
|
||||
- [Shard life-cycle](#shard-life-cycle)
|
||||
- [Shard precreation](#shard-precreation)
|
||||
- [Shard writes](#shard-writes)
|
||||
- [Shard compaction](#shard-compaction)
|
||||
- [Shard deletion](#shard-deletion)
|
||||
|
||||
## Shards
|
||||
A shard contains encoded and compressed time series data for a given time range
|
||||
defined by the [shard group duration](#shard-group-duration).
|
||||
All points in a [series](#series) within the specified shard group duration are stored in the same shard.
|
||||
A single shard contains multiple series, one or more [TSM files](#tsm-time-structured-merge-tree) on disk,
|
||||
and belongs to a [shard group](#shard-groups).
|
||||
|
||||
## Shard groups
|
||||
A shard group belongs to an InfluxDB [bucket](/influxdb/v2.0/reference/glossary/#bucket) and contains time series data for a specific time range defined by
|
||||
the [shard group duration](#shard-group-duration).
|
||||
|
||||
{{% note %}}
|
||||
In **InfluxDB OSS**, a shard group typically contains only a single shard.
|
||||
In an **InfluxDB Enterprise 1.x cluster**, shard groups contain multiple shards
|
||||
distributed across multiple data nodes.
|
||||
{{% /note %}}
|
||||
|
||||
### Shard group duration
|
||||
The **shard group duration** specifies the time range for each shard group and determines how often to create a new shard group.
|
||||
By default, InfluxDB sets the shard group duration according to
|
||||
the [retention period](/influxdb/v2.0/reference/glossary/#retention-period)
|
||||
of the bucket:
|
||||
|
||||
| Bucket retention period | Default shard group duration |
|
||||
|:----------------------- | ----------------------------:|
|
||||
| less than 2 days | 1h |
|
||||
| between 2 days and 6 months | 1d |
|
||||
| greater than 6 months | 7d |
|
||||
|
||||
##### Shard group duration configuration options
|
||||
To configure a custom bucket shard group duration, use the `--shard-group-duration`
|
||||
flag with the [`influx bucket create`](/influxdb/v2.0/reference/cli/influx/bucket/create/#create-a-custom-shard-group-duration)
|
||||
and [`influx bucket update`](/influxdb/v2.0/reference/cli/influx/bucket/update//#update-the-shard-group-duration-of-a-bucket) commands.
|
||||
|
||||
{{% note %}}
|
||||
Shard group durations must be shorter than the bucket's retention period.
|
||||
{{% /note %}}
|
||||
|
||||
To view your bucket's shard group duration, use the
|
||||
[`influx bucket list` command](/influxdb/v2.0/reference/cli/influx/bucket/list/).
|
||||
|
||||
### Shard group diagram
|
||||
The following diagram represents a **bucket** with a **4d retention period**
|
||||
and a **1d shard group duration**:
|
||||
|
||||
---
|
||||
|
||||
{{< html-diagram/shards >}}
|
||||
|
||||
---
|
||||
|
||||
## Shard life-cycle
|
||||
|
||||
### Shard precreation
|
||||
The InfluxDB **shard precreation service** pre-creates shards with future start
|
||||
and end times for each shard group based on the shard group duration.
|
||||
|
||||
The precreator service does not pre-create shards for past time ranges.
|
||||
When backfilling historical data, InfluxDB creates shards for past time ranges as needed,
|
||||
resulting in temporarily lower write throughput.
|
||||
|
||||
##### Shard precreation-related configuration settings
|
||||
- [`storage-shard-precreator-advance-period`](/influxdb/v2.0/reference/config-options/#storage-shard-precreator-advance-period)
|
||||
- [`storage-shard-precreator-check-interval`](/influxdb/v2.0/reference/config-options/#storage-shard-precreator-check-interval)
|
||||
|
||||
### Shard writes
|
||||
InfluxDB writes time series data to un-compacted or "hot" shards.
|
||||
When a shard is no longer actively written to, InfluxDB [compacts](#shard-compaction) shard data, resulting in a "cold" shard.
|
||||
|
||||
Typically, InfluxDB writes data to the most recent shard group, but when backfilling
|
||||
historical data, InfluxDB writes to older shards that must first be un-compacted.
|
||||
When the backfill is complete, InfluxDB re-compacts the older shards.
|
||||
|
||||
### Shard compaction
|
||||
InfluxDB compacts shards at regular intervals to compress time series data and optimize disk usage.
|
||||
InfluxDB uses the following four compaction levels:
|
||||
|
||||
- **Level 1 (L1):** InfluxDB flushes all newly written data held in an in-memory cache to disk.
|
||||
- **Level 2 (L2):** InfluxDB compacts up to eight L1-compacted files into one or more L2 files by
|
||||
combining multiple blocks containing the same series into fewer blocks in one or more new files.
|
||||
- **Level 3 (L3):** InfluxDB iterates over L2-compacted file blocks (over a certain size)
|
||||
and combines multiple blocks containing the same series into one block in a new file.
|
||||
- **Level 4 (L4):** **Full compaction**—InfluxDB iterates over L3-compacted file blocks
|
||||
and combines multiple blocks containing the same series into one block in a new file.
|
||||
|
||||
##### Shard compaction-related configuration settings
|
||||
- [`storage-compact-full-write-cold-duration`](/influxdb/v2.0/reference/config-options/#storage-compact-full-write-cold-duration)
|
||||
- [`storage-compact-throughput-burst`](/influxdb/v2.0/reference/config-options/#storage-compact-throughput-burst)
|
||||
- [`storage-max-concurrent-compactions`](/influxdb/v2.0/reference/config-options/#storage-max-concurrent-compactions)
|
||||
- [`storage-max-index-log-file-size`](/influxdb/v2.0/reference/config-options/#storage-max-index-log-file-size)
|
||||
- [`storage-series-file-max-concurrent-snapshot-compactions`](/influxdb/v2.0/reference/config-options/#storage-series-file-max-concurrent-snapshot-compactions)
|
||||
- [`storage-series-file-max-concurrent-snapshot-compactions`](/influxdb/v2.0/reference/config-options/#storage-series-file-max-concurrent-snapshot-compactions)
|
||||
|
||||
## Shard deletion
|
||||
The InfluxDB **retention enforcement service** routinely checks for shard groups
|
||||
older than their bucket's retention period.
|
||||
Once the start time of a shard group is beyond the bucket's retention period,
|
||||
InfluxDB deletes the shard group and associated shards and TSM files.
|
||||
|
||||
In buckets with an infinite retention period, shards remain on disk indefinitely.
|
||||
|
||||
{{% note %}}
|
||||
#### InfluxDB only deletes cold shards
|
||||
InfluxDB only deletes **cold** shards.
|
||||
If backfilling data beyond a bucket's retention period, the backfilled data will
|
||||
remain on disk until the following occurs:
|
||||
|
||||
1. The shard returns to a cold state.
|
||||
2. The retention enforcement service deletes the shard group.
|
||||
{{% /note %}}
|
||||
|
||||
##### Retention enforcement-related configuration settings
|
||||
- [`storage-retention-check-interval`](/influxdb/v2.0/reference/config-options/#storage-retention-check-interval)
|
|
@ -2,7 +2,7 @@
|
|||
title: InfluxDB storage engine
|
||||
description: >
|
||||
An overview of the InfluxDB storage engine architecture.
|
||||
weight: 101
|
||||
weight: 7
|
||||
menu:
|
||||
influxdb_2_0_ref:
|
||||
name: Storage engine
|
||||
|
|
|
@ -7,7 +7,7 @@ menu:
|
|||
influxdb_2_0_ref:
|
||||
name: System buckets
|
||||
parent: InfluxDB internals
|
||||
weight: 103
|
||||
weight: 101
|
||||
influxdb/v2.0/tags: [buckets]
|
||||
related:
|
||||
- /influxdb/v2.0/monitor-alert/
|
||||
|
|
|
@ -8,98 +8,6 @@ menu:
|
|||
weight: 101
|
||||
---
|
||||
|
||||
## v2.0.5 General Availability [2021-04-27]
|
||||
|
||||
### Windows Support
|
||||
This release includes our initial Windows preview build.
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
#### /debug/vars removed
|
||||
Prior to this release, the `influxd` server would expose profiling information over the `/debug/vars` endpoint.
|
||||
This endpoint was unauthenticated and not used by InfluxDB systems to report diagnostics.
|
||||
For security and clarity, the endpoint has been removed.
|
||||
Use the `/metrics` endpoint to collect system statistics.
|
||||
|
||||
#### `influx transpile` removed
|
||||
The `transpile` command has been removed. Send InfluxQL requests directly to the server via the `/api/v2/query`
|
||||
or `/query` HTTP endpoints.
|
||||
|
||||
#### Default query concurrency changed
|
||||
The default setting for the max number of concurrent Flux queries has been changed from 10 to unlimited (`0`).
|
||||
To limit query concurrency and queue size:
|
||||
|
||||
1. Set the `query-concurrency` config parameter to > 0 when running `influxd` to re-limit the maximum running query count,
|
||||
2. Set the `query-queue-size` config parameter to > 0 to set the max number of queries that can be queued before the
|
||||
server starts rejecting requests.
|
||||
|
||||
#### Prefix for query-controller metrics changed
|
||||
The prefix used for Prometheus metrics from the query controller has changed from `query_control_` to `qc_`.
|
||||
|
||||
### Features
|
||||
- Add [Swift client library](https://github.com/influxdata/influxdb-client-swift)
|
||||
to the **Load Data** section of the InfluxDB UI.
|
||||
- Add [`influx task retry-failed` command](/influxdb/v2.0/reference/cli/influx/task/retry-failed/) to rerun failed runs.
|
||||
- Add [`--compression` option](/influxdb/v2.0/reference/cli/influx/write/#flags)
|
||||
to the `influx write` command to support Gzip inputs.
|
||||
- Add new `influxd` configuration options:
|
||||
- [pprof-disabled](/influxdb/v2.0/reference/config-options/#pprof-disabled)
|
||||
- [metrics-disabled](/influxdb/v2.0/reference/config-options/#metrics-disabled)
|
||||
- [http-read-header-timeout](/influxdb/v2.0/reference/config-options/#http-read-header-timeout)
|
||||
- [http-read-timeout](/influxdb/v2.0/reference/config-options/#http-read-timeout)
|
||||
- [http-write-timeout](/influxdb/v2.0/reference/config-options/#http-write-timeout)
|
||||
- [http-idle-timeout](/influxdb/v2.0/reference/config-options/#http-idle-timeout)
|
||||
- Add `/debug/pprof/all` HTTP endpoint to gather all profiles at once.
|
||||
- Include the InfluxDB 1.x `http.pprof-enabled` configuration option in the 2.0
|
||||
configuration file generated by the [InfluxDB upgrade process](/influxdb/v2.0/upgrade/v1-to-v2/automatic-upgrade/).
|
||||
- Add support for [custom shard group durations](/influxdb/v2.0/reference/cli/influx/bucket/create/#create-a-bucket-with-a-custom-shard-group-duration) on buckets.
|
||||
- Optimize regular expression conditions in InfluxQL subqueries.
|
||||
- Update Telegraf plugins in the InfluxDB UI to include additions and changes from
|
||||
[Telegraf 1.18](/telegraf/v1.18/about_the_project/release-notes-changelog/#v118-2021-3-17).
|
||||
- Display task IDs in the tasks list in the InfluxDB UI.
|
||||
- Write to standard output (`stdout`) when `--output-path -` is passed to [`influxd inspect export-lp`](/influxdb/v2.0/reference/cli/influxd/inspect/export-lp/).
|
||||
- Add `-p, --profilers` flag to [`influx query` command](/influxdb/v2.0/reference/cli/influx/query/)
|
||||
to enable [Flux profilers](/influxdb/v2.0/reference/flux/stdlib/profiler/) on
|
||||
a query executed from the `influx` CLI.
|
||||
- Update InfluxDB OSS UI to match InfluxDB Cloud.
|
||||
- Support disabling concurrency limits in the Flux controller.
|
||||
- Replace unique resource IDs (UI assets, backup shards) with slugs to reduce
|
||||
cardinality of telemetry data.
|
||||
- Standardize HTTP server error log output.
|
||||
- Enable InfluxDB user interface features:
|
||||
- [Band visualization type](/influxdb/v2.0/visualize-data/visualization-types/band/)
|
||||
- [Mosiac visualization type](/influxdb/v2.0/visualize-data/visualization-types/mosaic/)
|
||||
- [Configure axis tick marks](/influxdb/v2.0/visualize-data/visualization-types/graph/#x-axis)
|
||||
- Upload CSV files through the InfluxDB UI
|
||||
- [Edit Telegraf configurations](/influxdb/v2.0/telegraf-configs/update/#edit-the-configuration-file-directly-in-the-ui) in the InfluxDB UI
|
||||
- [Legend orientation options](/influxdb/v2.0/visualize-data/visualization-types/graph/#legend)
|
||||
- [Refresh a single dashboard cell](/influxdb/v2.0/visualize-data/dashboards/control-dashboard/#refresh-a-single-dashboard-cell)
|
||||
- Upgrade to **Flux v0.113.0**.
|
||||
|
||||
### Bug Fixes
|
||||
- Prevent "do not have an execution context" error when parsing Flux options in tasks.
|
||||
- Fix swagger to match implementation of DBRPs type.
|
||||
- Fix use-after-free bug in series ID iterator.
|
||||
- Fix TSM and WAL segment size check to check against the local `SegmentSize`.
|
||||
- Fix TSM and WAL segment size computing to correctly calculate `totalOldDiskSize`.
|
||||
- Update references to the documentation site site to use current URLs.
|
||||
- Fix data race in then TSM engine when inspecting tombstone statistics.
|
||||
- Fix data race in then TSM cache.
|
||||
- Deprecate misleading `retentionPeriodHrs` key in the onboarding API.
|
||||
- Fix Single Stat graphs with thresholds crashing on negative values.
|
||||
- Fix InfluxDB port in Flux function UI examples.
|
||||
- Remove unauthenticated, unsupported `/debug/vars` HTTP endpoint.
|
||||
- Respect 24 hour clock formats in the InfluxDB UI and add more format choices.
|
||||
- Prevent "do not have an execution context" error when parsing Flux options in tasks.
|
||||
- Prevent time field names from being formatted in the Table visualization.
|
||||
- Log error details when `influxd upgrade` fails to migrate databases.
|
||||
- Fix the cipher suite used when TLS strict ciphers are enabled in `influxd`.
|
||||
- Fix parse error in UI for tag filters containing regular expression meta characters.
|
||||
- Prevent concurrent access panic when gathering bolt metrics.
|
||||
- Fix race condition in Flux controller shutdown.
|
||||
- Reduce lock contention when adding new fields and measurements.
|
||||
- Escape dots in community templates hostname regular expression.
|
||||
|
||||
## v2.0.4 General Availability [2021-02-04]
|
||||
|
||||
### Docker
|
||||
|
@ -126,7 +34,7 @@ The startup process automatically generates replacement `tsi1` indexes for shard
|
|||
- Add new [`influxd upgrade`](/influxdb/v2.0/reference/cli/influxd/upgrade/) flag `—overwrite-existing-v2` to overwrite existing files at output paths (instead of aborting).
|
||||
- Add new configuration options:
|
||||
- [`nats-port`](/influxdb/v2.0/reference/config-options/#nats-port)
|
||||
- [`nats-max-payload-bytes`](/influxdb/v2.0/reference/config-options/#nats-max-payload-bytes)
|
||||
- [`nats-max-payload-bytes`](/influxdb/v2.0/reference/config-options/#nats-max-payload-bytes)
|
||||
- Add new commands:
|
||||
- Add [`influxd print-config`](/influxdb/v2.0/reference/cli/influxd/print-config/) to support automated configuration inspection.
|
||||
- Add [`influxd inspect export-lp`](/influxdb/v2.0/reference/cli/influxd/inspect/export-lp/) to extract data in line-protocol format.
|
||||
|
|
|
@ -12,23 +12,23 @@ menu:
|
|||
weight: 103
|
||||
---
|
||||
|
||||
InfluxDB **authentication tokens** ensure secure interaction between users and data.
|
||||
InfluxDB ensures secure interaction between users and data through the use of **authentication tokens**.
|
||||
A token belongs to an organization and identifies InfluxDB permissions within the organization.
|
||||
|
||||
Learn how to create, view, update, or delete an authentication token.
|
||||
|
||||
## Authentication token types
|
||||
|
||||
- [Operator token](#operator-token)
|
||||
- [Admin token](#admin-token)
|
||||
- [All-Access token](#all-access-token)
|
||||
- [Read/Write token](#readwrite-token)
|
||||
|
||||
#### Operator token
|
||||
#### Admin token
|
||||
Grants full read and write access to all resources in **all organizations in InfluxDB OSS 2.x**.
|
||||
|
||||
{{% note %}}
|
||||
Operator tokens are created in the InfluxDB setup process and cannot be created manually.
|
||||
Because Operator tokens have full read and write access to all organizations in the database,
|
||||
Admin tokens are created in the InfluxDB setup process and cannot be created manually.
|
||||
Because Admin tokens have full read and write access to all organizations in the database,
|
||||
we recommend [creating an All-Access token](/influxdb/v2.0/security/tokens/create-token/)
|
||||
for each organization and using those to manage InfluxDB.
|
||||
This helps to prevent accidental interactions across organizations.
|
||||
|
@ -38,6 +38,6 @@ This helps to prevent accidental interactions across organizations.
|
|||
Grants full read and write access to all resources in an organization.
|
||||
|
||||
#### Read/Write token
|
||||
Grants read access, write access, or both to specific buckets in an organization.
|
||||
Grants read or write access to specific resources in an organization.
|
||||
|
||||
{{< children hlevel="h2" >}}
|
||||
|
|
|
@ -23,7 +23,7 @@ Tokens are visible only to the user who created them and stop working when the u
|
|||
{{< nav-icon "disks" >}}
|
||||
|
||||
2. Click **{{< icon "plus" >}} Generate** and select a token type
|
||||
(**Read/Write Token** or **All-Access Token**).
|
||||
(**Read/Write Token** or **All Access Token**).
|
||||
3. In the window that appears, enter a description for your token in the **Description** field.
|
||||
4. If generating a **read/write token**:
|
||||
- Search for and select buckets to read from in the **Read** pane.
|
||||
|
|
|
@ -100,43 +100,18 @@ You can continue to use Kapacitor with InfluxDB OSS 2.0 under the following scen
|
|||
|
||||
### User migration
|
||||
|
||||
`influxd upgrade` migrates existing 1.x users and their permissions **except** the following users:
|
||||
`influxd upgrade` migrates existing 1.x users and their permissions.
|
||||
However, it *does not migrate administrative users*.
|
||||
|
||||
- [1.x admin users](/{{< latest "influxdb" "v1" >}}/administration/authentication_and_authorization/#admin-users)
|
||||
- [1.x non-admin users](/{{< latest "influxdb" "v1" >}}/administration/authentication_and_authorization/#non-admin-users)
|
||||
that have not been granted any privileges
|
||||
|
||||
{{< expand-wrapper >}}
|
||||
{{% expand "Review 1.x user privileges" %}}
|
||||
**To review 1.x users with admin privileges**, run the following against your InfluxDB 1.x instance:
|
||||
|
||||
```sql
|
||||
SHOW USERS
|
||||
```
|
||||
|
||||
Users with `admin` set to `true` will **not** be migrated.
|
||||
|
||||
**To review the specific privileges granted to each 1.x user**, run the following for each user in your InfluxDB 1.x instance:
|
||||
|
||||
```sql
|
||||
SHOW GRANTS FOR "<username>"
|
||||
```
|
||||
|
||||
If no grants appear, the user will **not** be migrated.
|
||||
{{% /expand %}}
|
||||
{{< /expand-wrapper >}}
|
||||
To review users with admin permissions, in the InfluxDB 1.x CLI, run `show users`.
|
||||
Any users labeled "admin" *will not* be migrated.
|
||||
|
||||
If using an admin user for visualization or Chronograf administrative functions,
|
||||
**create a new read-only user before upgrading**:
|
||||
|
||||
##### Create a read-only 1.x user
|
||||
```sh
|
||||
> CREATE USER <username> WITH PASSWORD '<password>'
|
||||
> GRANT READ ON <database> TO "<username>"
|
||||
```
|
||||
|
||||
InfluxDB 2.0 only grants admin privileges to the primary user set up during the InfluxDB 2.0 upgrade.
|
||||
This provides you with the opportunity to reassess who to grant admin permissions when setting up InfluxDB 2.0.
|
||||
create a new read-only user before upgrading.
|
||||
Admin rights are granted to the primary user created in the InfluxDB 2.0 setup
|
||||
process which runs at the end of the upgrade process.
|
||||
This provides you with the opportunity to re-assess who should be granted
|
||||
admin-level access in your InfluxDB 2.0 setup.
|
||||
|
||||
### Dashboards
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ If you're using custom configuration settings in your InfluxDB 1.x instance, do
|
|||
| access-log-path | |
|
||||
| access-log-status-filters | |
|
||||
| write-tracing | |
|
||||
| pprof-enabled | [pprof-disabled](/influxdb/v2.0/reference/config-options/#pprof-disabled) |
|
||||
| pprof-enabled | |
|
||||
| pprof-auth-enabled | |
|
||||
| debug-pprof-enabled | |
|
||||
| ping-auth-enabled | |
|
||||
|
@ -99,7 +99,7 @@ If you're using custom configuration settings in your InfluxDB 1.x instance, do
|
|||
| max-body-size | |
|
||||
| max-concurrent-write-limit | |
|
||||
| max-enqueued-write-limit | |
|
||||
| enqueued-write-timeout | [http-write-timeout](/influxdb/v2.0/reference/config-options/#http-write-timeout) |
|
||||
| enqueued-write-timeout | |
|
||||
| | |
|
||||
| **[logging]** | |
|
||||
| format | |
|
||||
|
@ -160,7 +160,7 @@ _For more information about DBRP mapping, see
|
|||
|
||||
2. **Create a DBRP mapping**
|
||||
Use the [`influx v1 dbrp create` command](/influxdb/v2.0/reference/cli/influx/v1/dbrp/create/)
|
||||
to create a DBRP mapping.
|
||||
command to create a DBRP mapping.
|
||||
Provide the following:
|
||||
|
||||
- database name
|
||||
|
@ -332,7 +332,7 @@ To migrate time series data from your InfluxDB 1.x instance to InfluxDB 2.0:
|
|||
--file /path/to/example-db_example-rp.lp
|
||||
```
|
||||
|
||||
3. Repeat steps 1-2 for each bucket.
|
||||
Repeat this process for each bucket.
|
||||
|
||||
## Migrate continuous queries
|
||||
For information about migrating InfluxDB 1.x continuous queries to InfluxDB 2.0 tasks,
|
||||
|
|
|
@ -37,14 +37,14 @@ influxd upgrade --continuous-query-export-path /path/to/continuous_queries.txt
|
|||
|
||||
**To manually output continuous queries:**
|
||||
|
||||
1. Use the **InfluxDB 1.x `influx` interactive shell** to run `SHOW CONTINUOUS QUERIES`:
|
||||
1. Use the InfluxDB 1.x `influx` interactive shell to run `show continuous queries`:
|
||||
|
||||
{{< keep-url >}}
|
||||
```sh
|
||||
$ influx
|
||||
Connected to http://localhost:8086 version 1.8.5
|
||||
InfluxDB shell version: 1.8.5
|
||||
> SHOW CONTINUOUS QUERIES
|
||||
> show continuous queries
|
||||
```
|
||||
|
||||
2. Copy and save the displayed continuous queries.
|
||||
|
|
|
@ -37,11 +37,6 @@ Click the timezone dropdown to select a timezone to use for the dashboard. Selec
|
|||
|
||||
Click the refresh button (**{{< icon "refresh" >}}**) to manually refresh the dashboard's data.
|
||||
|
||||
#### Refresh a single dashboard cell
|
||||
|
||||
1. Click the **{{< icon "gear" >}}** on the dashboard cell you want to refresh.
|
||||
2. Click **{{< icon "refresh" >}} Refresh**.
|
||||
|
||||
### Select time range
|
||||
|
||||
1. Select from the time range options in the dropdown menu.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue