Merge branch 'master' of github.com:influxdata/docs-v2
commit
508f2e397c
|
@ -1640,6 +1640,68 @@ paths:
|
|||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
/delete:
|
||||
post:
|
||||
summary: Delete time series data from InfluxDB
|
||||
requestBody:
|
||||
description: Predicate delete request
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/DeletePredicateRequest"
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/TraceSpan'
|
||||
- in: query
|
||||
name: org
|
||||
description: Specifies the organization that owns the bucket.
|
||||
schema:
|
||||
type: string
|
||||
description: All points within batch are written to this organization.
|
||||
- in: query
|
||||
name: bucket
|
||||
description: Specifies the bucket to delete data from.
|
||||
schema:
|
||||
type: string
|
||||
description: Points are only deleted from this bucket.
|
||||
- in: query
|
||||
name: orgID
|
||||
description: Specifies the organization ID that owns the bucket.
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: bucketID
|
||||
description: Specifies the bucket ID to delete data from.
|
||||
schema:
|
||||
type: string
|
||||
description: Points are only deleted from this bucket.
|
||||
responses:
|
||||
'204':
|
||||
description: delete has been accepted
|
||||
'400':
|
||||
description: invalid request.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
'404':
|
||||
description: the bucket or organization is not found.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
'403':
|
||||
description: no token was sent or does not have sufficient permissions.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
default:
|
||||
description: internal server error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
/ready:
|
||||
servers:
|
||||
- url: /
|
||||
|
@ -3212,7 +3274,7 @@ paths:
|
|||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Bucket"
|
||||
$ref: "#/components/schemas/PostBucketRequest"
|
||||
responses:
|
||||
'201':
|
||||
description: Bucket created
|
||||
|
@ -3975,6 +4037,12 @@ paths:
|
|||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ResourceMembers"
|
||||
'404':
|
||||
description: Organization not found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
default:
|
||||
description: Unexpected error
|
||||
content:
|
||||
|
@ -4067,6 +4135,12 @@ paths:
|
|||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ResourceOwners"
|
||||
'404':
|
||||
description: Organization not found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
default:
|
||||
description: Unexpected error
|
||||
content:
|
||||
|
@ -5126,7 +5200,7 @@ paths:
|
|||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Check"
|
||||
$ref: "#/components/schemas/PostCheck"
|
||||
responses:
|
||||
'201':
|
||||
description: Check created
|
||||
|
@ -5420,7 +5494,7 @@ paths:
|
|||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/NotificationRule"
|
||||
$ref: "#/components/schemas/PostNotificationRule"
|
||||
responses:
|
||||
'201':
|
||||
description: Notification rule created
|
||||
|
@ -5780,7 +5854,7 @@ paths:
|
|||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/NotificationEndpoint"
|
||||
$ref: "#/components/schemas/PostNotificationEndpoint"
|
||||
responses:
|
||||
'201':
|
||||
description: Notification endpoint created
|
||||
|
@ -6160,6 +6234,21 @@ components:
|
|||
$ref: "#/components/schemas/Identifier"
|
||||
path:
|
||||
$ref: "#/components/schemas/StringLiteral"
|
||||
DeletePredicateRequest:
|
||||
description: The delete predicate request
|
||||
type: object
|
||||
required: [start, stop]
|
||||
properties:
|
||||
start:
|
||||
description: RFC3339Nano
|
||||
type: string
|
||||
stop:
|
||||
description: RFC3339Nano
|
||||
type: string
|
||||
predicate:
|
||||
description: SQL-like predicate expression
|
||||
example: tag1="value1" and tag2="value2"
|
||||
type: string
|
||||
Node:
|
||||
oneOf:
|
||||
- $ref: "#/components/schemas/Expression"
|
||||
|
@ -6674,6 +6763,34 @@ components:
|
|||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/Authorization"
|
||||
PostBucketRequest:
|
||||
properties:
|
||||
orgID:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
description:
|
||||
type: string
|
||||
rp:
|
||||
type: string
|
||||
retentionRules:
|
||||
type: array
|
||||
description: Rules to expire or retain data. No rules means data never expires.
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
default: expire
|
||||
enum:
|
||||
- expire
|
||||
everySeconds:
|
||||
type: integer
|
||||
description: Duration in seconds for how long data will be kept in the database.
|
||||
example: 86400
|
||||
minimum: 1
|
||||
required: [type, everySeconds]
|
||||
required: [name, retentionRules]
|
||||
Bucket:
|
||||
properties:
|
||||
links:
|
||||
|
@ -7038,6 +7155,16 @@ components:
|
|||
type: string
|
||||
format: date-time
|
||||
readOnly: true
|
||||
lastRunStatus:
|
||||
readOnly: true
|
||||
type: string
|
||||
enum:
|
||||
- failed
|
||||
- success
|
||||
- canceled
|
||||
lastRunError:
|
||||
readOnly: true
|
||||
type: string
|
||||
createdAt:
|
||||
type: string
|
||||
format: date-time
|
||||
|
@ -9261,9 +9388,6 @@ components:
|
|||
TaskCreateRequest:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
description: The type of task, this can be used for filtering tasks on list actions.
|
||||
type: string
|
||||
orgID:
|
||||
description: The ID of the organization that owns this Task.
|
||||
type: string
|
||||
|
@ -9319,7 +9443,7 @@ components:
|
|||
enum:
|
||||
- active
|
||||
- inactive
|
||||
Check:
|
||||
CheckDiscriminator:
|
||||
oneOf:
|
||||
- $ref: "#/components/schemas/DeadmanCheck"
|
||||
- $ref: "#/components/schemas/ThresholdCheck"
|
||||
|
@ -9328,6 +9452,28 @@ components:
|
|||
mapping:
|
||||
deadman: "#/components/schemas/DeadmanCheck"
|
||||
threshold: "#/components/schemas/ThresholdCheck"
|
||||
Check:
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/CheckDiscriminator"
|
||||
- type: object
|
||||
properties:
|
||||
labels:
|
||||
$ref: "#/components/schemas/Labels"
|
||||
PostCheck:
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/CheckDiscriminator"
|
||||
- type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum:
|
||||
- active
|
||||
- inactive
|
||||
labels:
|
||||
type: array
|
||||
description: List of label IDs to associate with check
|
||||
items:
|
||||
type: string
|
||||
Checks:
|
||||
properties:
|
||||
checks:
|
||||
|
@ -9384,8 +9530,6 @@ components:
|
|||
statusMessageTemplate:
|
||||
description: The template used to generate and write a status message.
|
||||
type: string
|
||||
labels:
|
||||
$ref: "#/components/schemas/Labels"
|
||||
links:
|
||||
type: object
|
||||
readOnly: true
|
||||
|
@ -9518,7 +9662,7 @@ components:
|
|||
enum:
|
||||
- active
|
||||
- inactive
|
||||
NotificationRule:
|
||||
NotificationRuleDiscriminator:
|
||||
oneOf:
|
||||
- $ref: "#/components/schemas/SlackNotificationRule"
|
||||
- $ref: "#/components/schemas/SMTPNotificationRule"
|
||||
|
@ -9531,6 +9675,28 @@ components:
|
|||
smtp: "#/components/schemas/SMTPNotificationRule"
|
||||
pagerduty: "#/components/schemas/PagerDutyNotificationRule"
|
||||
http: "#/components/schemas/HTTPNotificationRule"
|
||||
NotificationRule:
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/NotificationRuleDiscriminator"
|
||||
- type: object
|
||||
properties:
|
||||
labels:
|
||||
$ref: "#/components/schemas/Labels"
|
||||
PostNotificationRule:
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/NotificationRuleDiscriminator"
|
||||
- type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum:
|
||||
- active
|
||||
- inactive
|
||||
labels:
|
||||
type: array
|
||||
description: List of label IDs to associate with notification rule
|
||||
items:
|
||||
type: string
|
||||
NotificationRules:
|
||||
properties:
|
||||
notificationRules:
|
||||
|
@ -9605,8 +9771,6 @@ components:
|
|||
minItems: 1
|
||||
items:
|
||||
$ref: "#/components/schemas/StatusRule"
|
||||
labels:
|
||||
$ref: "#/components/schemas/Labels"
|
||||
links:
|
||||
type: object
|
||||
readOnly: true
|
||||
|
@ -9719,7 +9883,7 @@ components:
|
|||
enum:
|
||||
- active
|
||||
- inactive
|
||||
NotificationEndpoint:
|
||||
NotificationEndpointDiscrimator:
|
||||
oneOf:
|
||||
- $ref: "#/components/schemas/SlackNotificationEndpoint"
|
||||
- $ref: "#/components/schemas/PagerDutyNotificationEndpoint"
|
||||
|
@ -9730,6 +9894,28 @@ components:
|
|||
slack: "#/components/schemas/SlackNotificationEndpoint"
|
||||
pagerduty: "#/components/schemas/PagerDutyNotificationEndpoint"
|
||||
http: "#/components/schemas/HTTPNotificationEndpoint"
|
||||
NotificationEndpoint:
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/NotificationEndpointDiscrimator"
|
||||
- type: object
|
||||
properties:
|
||||
labels:
|
||||
$ref: "#/components/schemas/Labels"
|
||||
PostNotificationEndpoint:
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/NotificationEndpointDiscrimator"
|
||||
- type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum:
|
||||
- active
|
||||
- inactive
|
||||
labels:
|
||||
type: array
|
||||
description: List of label IDs to associate with check
|
||||
items:
|
||||
type: string
|
||||
NotificationEndpoints:
|
||||
properties:
|
||||
notificationEndpoints:
|
||||
|
@ -9766,8 +9952,6 @@ components:
|
|||
default: active
|
||||
type: string
|
||||
enum: ["active", "inactive"]
|
||||
labels:
|
||||
$ref: "#/components/schemas/Labels"
|
||||
links:
|
||||
type: object
|
||||
readOnly: true
|
||||
|
@ -9808,7 +9992,7 @@ components:
|
|||
allOf:
|
||||
- $ref: "#/components/schemas/NotificationEndpointBase"
|
||||
- type: object
|
||||
required: [clientURL, routingKey]
|
||||
required: [routingKey]
|
||||
properties:
|
||||
clientURL:
|
||||
type: string
|
||||
|
|
|
@ -27,24 +27,25 @@ This article describes how to get started with InfluxDB OSS. To get started with
|
|||
### Download and install InfluxDB v2.0 alpha
|
||||
Download InfluxDB v2.0 alpha for macOS.
|
||||
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb_2.0.0-alpha.18_darwin_amd64.tar.gz" download>InfluxDB v2.0 alpha (macOS)</a>
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb_2.0.0-alpha.19_darwin_amd64.tar.gz" download>InfluxDB v2.0 alpha (macOS)</a>
|
||||
|
||||
### Unpackage the InfluxDB binaries
|
||||
Unpackage the downloaded archive.
|
||||
|
||||
_**Note:** The following commands are examples. Adjust the file paths to your own needs._
|
||||
To unpackage the downloaded archive, **double click the archive file in Finder**
|
||||
or run the following command in a macOS command prompt application such
|
||||
**Terminal** or **[iTerm2](https://www.iterm2.com/)**:
|
||||
|
||||
```sh
|
||||
# Unpackage contents to the current working directory
|
||||
gunzip -c ~/Downloads/influxdb_2.0.0-alpha.18_darwin_amd64.tar.gz | tar xopf -
|
||||
tar zxvf ~/Downloads/influxdb_2.0.0-alpha.19_darwin_amd64.tar.gz
|
||||
```
|
||||
|
||||
If you choose, you can place `influx` and `influxd` in your `$PATH`.
|
||||
You can also prefix the executables with `./` to run then in place.
|
||||
#### (Optional) Place the binaries in your $PATH
|
||||
If you choose, you can place `influx` and `influxd` in your `$PATH` or you can
|
||||
prefix the executables with `./` to run then in place.
|
||||
|
||||
```sh
|
||||
# (Optional) Copy the influx and influxd binary to your $PATH
|
||||
sudo cp influxdb_2.0.0-alpha.18_darwin_amd64/{influx,influxd} /usr/local/bin/
|
||||
sudo cp influxdb_2.0.0-alpha.19_darwin_amd64/{influx,influxd} /usr/local/bin/
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
|
@ -90,8 +91,8 @@ influxd --reporting-disabled
|
|||
### Download and install InfluxDB v2.0 alpha
|
||||
Download the InfluxDB v2.0 alpha package appropriate for your chipset.
|
||||
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb_2.0.0-alpha.18_linux_amd64.tar.gz" download >InfluxDB v2.0 alpha (amd64)</a>
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb_2.0.0-alpha.18_linux_arm64.tar.gz" download >InfluxDB v2.0 alpha (arm)</a>
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb_2.0.0-alpha.19_linux_amd64.tar.gz" download >InfluxDB v2.0 alpha (amd64)</a>
|
||||
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb_2.0.0-alpha.19_linux_arm64.tar.gz" download >InfluxDB v2.0 alpha (arm)</a>
|
||||
|
||||
### Place the executables in your $PATH
|
||||
Unpackage the downloaded archive and place the `influx` and `influxd` executables in your system `$PATH`.
|
||||
|
@ -100,10 +101,10 @@ _**Note:** The following commands are examples. Adjust the file names, paths, an
|
|||
|
||||
```sh
|
||||
# Unpackage contents to the current working directory
|
||||
tar xvzf path/to/influxdb_2.0.0-alpha.18_linux_amd64.tar.gz
|
||||
tar xvzf path/to/influxdb_2.0.0-alpha.19_linux_amd64.tar.gz
|
||||
|
||||
# Copy the influx and influxd binary to your $PATH
|
||||
sudo cp influxdb_2.0.0-alpha.18_linux_amd64/{influx,influxd} /usr/local/bin/
|
||||
sudo cp influxdb_2.0.0-alpha.19_linux_amd64/{influx,influxd} /usr/local/bin/
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
|
|
|
@ -15,19 +15,6 @@ related:
|
|||
Create a check in the InfluxDB user interface (UI).
|
||||
Checks query data and apply a status to each point based on specified conditions.
|
||||
|
||||
## Check types
|
||||
There are two types of checks – a threshold check and a deadman check.
|
||||
|
||||
#### Threshold check
|
||||
A threshold check assigns a status based on a value being above, below,
|
||||
inside, or outside of defined thresholds.
|
||||
[Create a threshold check](#create-a-threshold-check).
|
||||
|
||||
#### Deadman check
|
||||
A deadman check assigns a status to data when a series or group doesn't report
|
||||
in a specified amount of time.
|
||||
[Create a deadman check](#create-a-deadman-check).
|
||||
|
||||
## Parts of a check
|
||||
A check consists of two parts – a query and check configuration.
|
||||
|
||||
|
@ -44,6 +31,17 @@ A check consists of two parts – a query and check configuration.
|
|||
- `ok`
|
||||
- Stores status in the `_level` column.
|
||||
|
||||
## Check types
|
||||
There are two types of checks – a threshold check and a deadman check.
|
||||
|
||||
#### Threshold check
|
||||
A threshold check assigns a status based on a value being above, below,
|
||||
inside, or outside of defined thresholds.
|
||||
|
||||
#### Deadman check
|
||||
A deadman check assigns a status to data when a series or group doesn't report
|
||||
in a specified amount of time.
|
||||
|
||||
## Create a check in the InfluxDB UI
|
||||
1. Click **Monitoring & Alerting** in the sidebar in the InfluxDB UI.
|
||||
|
||||
|
|
|
@ -35,12 +35,6 @@ but examples include mean, median, top, bottom, etc.
|
|||
View [Flux's aggregate functions](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/)
|
||||
for more information and ideas.
|
||||
|
||||
## Create a destination bucket
|
||||
By design, tasks cannot write to the same bucket from which they are reading.
|
||||
You need another bucket where the task can store the aggregated, downsampled data.
|
||||
|
||||
_For information about creating buckets, see [Create a bucket](#)._
|
||||
|
||||
## Example downsampling task script
|
||||
The example task script below is a very basic form of data downsampling that does the following:
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@ menu:
|
|||
weight: 101
|
||||
---
|
||||
|
||||
InfluxDB tasks are scheduled Flux scripts that take a stream of input data, modify or analyze
|
||||
it in some way, then store the modified data in a new bucket or perform other actions.
|
||||
An **InfluxDB task** is a scheduled Flux script that takes a stream of input data, modifies or analyzes
|
||||
it in some way, then stores the modified data in a new bucket or performs other actions.
|
||||
|
||||
This article walks through writing a basic InfluxDB task that downsamples
|
||||
data and stores it in a new bucket.
|
||||
|
@ -49,8 +49,7 @@ _See [Task configuration options](/v2.0/process-data/task-options) for detailed
|
|||
about each option._
|
||||
|
||||
{{% note %}}
|
||||
If creating a task in the InfluxDB user interface (UI), task options are defined
|
||||
in form fields when creating the task.
|
||||
When creating a task in the InfluxDB user interface (UI), task options are defined in form fields.
|
||||
{{% /note %}}
|
||||
|
||||
## Define a data source
|
||||
|
@ -102,7 +101,7 @@ _See [Common tasks](/v2.0/process-data/common-tasks) for examples of tasks commo
|
|||
|
||||
## Define a destination
|
||||
In the vast majority of task use cases, once data is transformed, it needs to sent and stored somewhere.
|
||||
This could be a separate bucket with a different retention policy, another measurement, or even an alert endpoint _(Coming)_.
|
||||
This could be a separate bucket or another measurement.
|
||||
|
||||
The example below uses Flux's [`to()` function](/v2.0/reference/flux/stdlib/built-in/outputs/to)
|
||||
to send the transformed data to another bucket:
|
||||
|
@ -113,15 +112,11 @@ to send the transformed data to another bucket:
|
|||
```
|
||||
|
||||
{{% note %}}
|
||||
#### Important notes
|
||||
- You cannot write to the same bucket you are reading from.
|
||||
- In order to write data into InfluxDB, you must have `_time`, `_measurement`,
|
||||
`_field`, and `_value` columns.
|
||||
In order to write data into InfluxDB, you must have `_time`, `_measurement`, `_field`, and `_value` columns.
|
||||
{{% /note %}}
|
||||
|
||||
## Full example task script
|
||||
Below is the full example task script that combines all of the components described above:
|
||||
|
||||
Below is a task script that combines all of the components described above:
|
||||
|
||||
```js
|
||||
// Task options
|
||||
|
|
|
@ -19,7 +19,7 @@ To view your tasks, click the **Tasks** icon in the left navigation menu.
|
|||
|
||||
Click on the name of a task to update it.
|
||||
|
||||
#### Update a task's Flux script
|
||||
#### Update a task Flux script
|
||||
1. In the list of tasks, click the **Name** of the task you want to update.
|
||||
2. In the left panel, modify the task options.
|
||||
3. In the right panel, modify the task script.
|
||||
|
@ -30,12 +30,17 @@ Click on the name of a task to update it.
|
|||
In the list of tasks, click the {{< icon "toggle" >}} toggle to the left of the
|
||||
task you want to activate or inactivate.
|
||||
|
||||
#### Update a task description
|
||||
1. In the list of tasks, hover over the name of the task you want to update.
|
||||
2. Click the pencil icon {{< icon "pencil" >}}.
|
||||
3. Click outside of the field or press `RETURN` to update.
|
||||
|
||||
## Update a task with the influx CLI
|
||||
Use the `influx task update` command to update or change the status of an existing task.
|
||||
|
||||
_This command requires a task ID, which is available in the output of `influx task find`._
|
||||
|
||||
#### Update a task's Flux script
|
||||
#### Update a task Flux script
|
||||
Pass the file path of your updated Flux script to the `influx task update` command
|
||||
with the ID of the task you want to update.
|
||||
Modified [task options](/v2.0/process-data/task-options) defined in the Flux
|
||||
|
|
|
@ -12,8 +12,8 @@ weight: 105
|
|||
v2.0/tags: [tasks, flux]
|
||||
---
|
||||
|
||||
Task options define specific information about the task and are specified in your
|
||||
Flux script or in the InfluxDB user interface (UI).
|
||||
Task options define specific information about a task.
|
||||
They are set in a Flux script or in the InfluxDB user interface (UI).
|
||||
The following task options are available:
|
||||
|
||||
- [name](#name)
|
||||
|
@ -44,8 +44,6 @@ The interval at which the task runs.
|
|||
|
||||
_**Data type:** Duration_
|
||||
|
||||
_**Note:** In the InfluxDB UI, the **Interval** field sets this option_.
|
||||
|
||||
```js
|
||||
options task = {
|
||||
// ...
|
||||
|
@ -53,6 +51,10 @@ options task = {
|
|||
}
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
In the InfluxDB UI, the **Interval** field sets this option.
|
||||
{{% /note %}}
|
||||
|
||||
## cron
|
||||
The [cron expression](https://en.wikipedia.org/wiki/Cron#Overview) that
|
||||
defines the schedule on which the task runs.
|
||||
|
|
|
@ -64,7 +64,7 @@ In your request, set the following:
|
|||
- `Content-type` header to `application/vnd.flux`.
|
||||
- Your plain text query as the request's raw data.
|
||||
|
||||
InfluxDB returns the query results in [annotated CSV](/v2.0/reference/annotated-csv/).
|
||||
InfluxDB returns the query results in [annotated CSV](/v2.0/reference/syntax/annotated-csv/).
|
||||
|
||||
{{% note %}}
|
||||
#### Use gzip to compress the query response
|
||||
|
|
|
@ -14,6 +14,11 @@ weight: 209
|
|||
|
||||
Flux provides `if`, `then`, and `else` conditional expressions that allow for powerful and flexible Flux queries.
|
||||
|
||||
If you're just getting started with Flux queries, check out the following:
|
||||
|
||||
- [Get started with Flux](/v2.0/query-data/get-started/) for a conceptual overview of Flux and parts of a Flux query.
|
||||
- [Execute queries](/v2.0/query-data/execute-queries/) to discover a variety of ways to run your queries.
|
||||
|
||||
##### Conditional expression syntax
|
||||
```js
|
||||
// Pattern
|
||||
|
|
|
@ -25,6 +25,11 @@ exists p.height
|
|||
// Returns false
|
||||
```
|
||||
|
||||
If you're just getting started with Flux queries, check out the following:
|
||||
|
||||
- [Get started with Flux](/v2.0/query-data/get-started/) for a conceptual overview of Flux and parts of a Flux query.
|
||||
- [Execute queries](/v2.0/query-data/execute-queries/) to discover a variety of ways to run your queries.
|
||||
|
||||
Use `exists` with row functions (
|
||||
[`filter()`](/v2.0/reference/flux/stdlib/built-in/transformations/filter/),
|
||||
[`map()`](/v2.0/reference/flux/stdlib/built-in/transformations/map/),
|
||||
|
|
|
@ -15,6 +15,11 @@ With Flux, you can group data by any column in your queried data set.
|
|||
"Grouping" partitions data into tables in which each row shares a common value for specified columns.
|
||||
This guide walks through grouping data in Flux and provides examples of how data is shaped in the process.
|
||||
|
||||
If you're just getting started with Flux queries, check out the following:
|
||||
|
||||
- [Get started with Flux](/v2.0/query-data/get-started/) for a conceptual overview of Flux and parts of a Flux query.
|
||||
- [Execute queries](/v2.0/query-data/execute-queries/) to discover a variety of ways to run your queries.
|
||||
|
||||
## Group keys
|
||||
Every table has a **group key** – a list of columns which for which every row in the table has the same value.
|
||||
|
||||
|
|
|
@ -9,11 +9,16 @@ menu:
|
|||
weight: 208
|
||||
---
|
||||
|
||||
|
||||
Histograms provide valuable insight into the distribution of your data.
|
||||
This guide walks through using Flux's `histogram()` function to transform your data into a **cumulative histogram**.
|
||||
|
||||
If you're just getting started with Flux queries, check out the following:
|
||||
|
||||
- [Get started with Flux](/v2.0/query-data/get-started/) for a conceptual overview of Flux and parts of a Flux query.
|
||||
- [Execute queries](/v2.0/query-data/execute-queries/) to discover a variety of ways to run your queries.
|
||||
|
||||
## histogram() function
|
||||
|
||||
The [`histogram()` function](/v2.0/reference/flux/stdlib/built-in/transformations/histogram) approximates the
|
||||
cumulative distribution of a dataset by counting data frequencies for a list of "bins."
|
||||
A **bin** is simply a range in which a data point falls.
|
||||
|
|
|
@ -21,6 +21,11 @@ InfluxDB - memory usage and processes.
|
|||
In this guide, we'll join two data streams, one representing memory usage and the other representing the
|
||||
total number of running processes, then calculate the average memory usage per running process.
|
||||
|
||||
If you're just getting started with Flux queries, check out the following:
|
||||
|
||||
- [Get started with Flux](/v2.0/query-data/get-started/) for a conceptual overview of Flux and parts of a Flux query.
|
||||
- [Execute queries](/v2.0/query-data/execute-queries/) to discover a variety of ways to run your queries.
|
||||
|
||||
## Define stream variables
|
||||
In order to perform a join, you must have two streams of data.
|
||||
Assign a variable to each data stream.
|
||||
|
|
|
@ -15,6 +15,11 @@ Use Flux to process and manipulate timestamps to suit your needs.
|
|||
- [Convert timestamp format](#convert-timestamp-format)
|
||||
- [Time-related Flux functions](#time-related-flux-functions)
|
||||
|
||||
If you're just getting started with Flux queries, check out the following:
|
||||
|
||||
- [Get started with Flux](/v2.0/query-data/get-started/) for a conceptual overview of Flux and parts of a Flux query.
|
||||
- [Execute queries](/v2.0/query-data/execute-queries/) to discover a variety of ways to run your queries.
|
||||
|
||||
## Convert timestamp format
|
||||
|
||||
### Convert nanosecond epoch timestamp to RFC3339
|
||||
|
@ -27,7 +32,7 @@ time(v: 1568808000000000000)
|
|||
```
|
||||
|
||||
### Convert RFC3339 to nanosecond epoch timestamp
|
||||
Use the [`uint()` function](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/unit/)
|
||||
Use the [`uint()` function](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/uint/)
|
||||
to convert an RFC3339 timestamp to a nanosecond epoch timestamp.
|
||||
|
||||
```js
|
||||
|
@ -59,7 +64,7 @@ to convert the duration to a string.
|
|||
|
||||
## Time-related Flux functions
|
||||
|
||||
### Retrieve the current time
|
||||
### Retrieve the current UTC time
|
||||
Use the [`now()` function](/v2.0/reference/flux/stdlib/built-in/misc/now/) to
|
||||
return the current UTC time in RFC3339 format.
|
||||
|
||||
|
@ -67,6 +72,26 @@ return the current UTC time in RFC3339 format.
|
|||
now()
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
`now()` is cached at runtime, so all instances of `now()` in a Flux script
|
||||
return the same value.
|
||||
{{% /note %}}
|
||||
|
||||
### Retrieve the current system time
|
||||
Import the `system` package and use the [`system.time()` function](/v2.0/reference/flux/stdlib/system/time/)
|
||||
to return the current system time of the host machine in RFC3339 format.
|
||||
|
||||
```js
|
||||
import "system"
|
||||
|
||||
system.time()
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
`system.time()` returns the time it is executed, so each instance of `system.time()`
|
||||
in a Flux script returns a unique value.
|
||||
{{% /note %}}
|
||||
|
||||
### Add a duration to a timestamp
|
||||
The [`experimental.addDuration()` function](/v2.0/reference/flux/stdlib/experimental/addduration/)
|
||||
adds a duration to a specified time and returns the resulting time.
|
||||
|
|
|
@ -15,6 +15,11 @@ supports mathematic expressions in data transformations.
|
|||
This article describes how to use [Flux arithmetic operators](/v2.0/reference/flux/language/operators/#arithmetic-operators)
|
||||
to "map" over data and transform values using mathematic operations.
|
||||
|
||||
If you're just getting started with Flux queries, check out the following:
|
||||
|
||||
- [Get started with Flux](/v2.0/query-data/get-started/) for a conceptual overview of Flux and parts of a Flux query.
|
||||
- [Execute queries](/v2.0/query-data/execute-queries/) to discover a variety of ways to run your queries.
|
||||
|
||||
##### Basic mathematic operations
|
||||
```js
|
||||
// Examples executed using the Flux REPL
|
||||
|
|
|
@ -14,6 +14,11 @@ With Flux, regular expressions are primarily used for evaluation logic in predic
|
|||
such as filtering rows, dropping and keeping columns, state detection, etc.
|
||||
This guide shows how to use regular expressions in your Flux scripts.
|
||||
|
||||
If you're just getting started with Flux queries, check out the following:
|
||||
|
||||
- [Get started with Flux](/v2.0/query-data/get-started/) for a conceptual overview of Flux and parts of a Flux query.
|
||||
- [Execute queries](/v2.0/query-data/execute-queries/) to discover a variety of ways to run your queries.
|
||||
|
||||
## Go regular expression syntax
|
||||
Flux uses Go's [regexp package](https://golang.org/pkg/regexp/) for regular expression search.
|
||||
The links [below](#helpful-links) provide information about Go's regular expression syntax.
|
||||
|
|
|
@ -14,6 +14,14 @@ weight: 206
|
|||
|
||||
The [`sort()`function](/v2.0/reference/flux/stdlib/built-in/transformations/sort)
|
||||
orders the records within each table.
|
||||
|
||||
If you're just getting started with Flux queries, check out the following:
|
||||
|
||||
- [Get started with Flux](/v2.0/query-data/get-started/) for a conceptual overview of Flux and parts of a Flux query.
|
||||
- [Execute queries](/v2.0/query-data/execute-queries/) to discover a variety of ways to run your queries.
|
||||
|
||||
##### Example sorting system uptime
|
||||
|
||||
The following example orders system uptime first by region, then host, then value.
|
||||
|
||||
```js
|
||||
|
|
|
@ -21,6 +21,11 @@ and use the results with InfluxDB dashboards, tasks, and other operations.
|
|||
- [Use SQL results to populate dashboard variables](#use-sql-results-to-populate-dashboard-variables)
|
||||
- [Sample sensor data](#sample-sensor-data)
|
||||
|
||||
If you're just getting started with Flux queries, check out the following:
|
||||
|
||||
- [Get started with Flux](/v2.0/query-data/get-started/) for a conceptual overview of Flux and parts of a Flux query.
|
||||
- [Execute queries](/v2.0/query-data/execute-queries/) to discover a variety of ways to run your queries.
|
||||
|
||||
## Query a SQL data source
|
||||
To query a SQL data source:
|
||||
|
||||
|
|
|
@ -17,6 +17,11 @@ or "windowing" data, then aggregating windowed values into a new value.
|
|||
This guide walks through windowing and aggregating data with Flux and demonstrates
|
||||
how data is shaped in the process.
|
||||
|
||||
If you're just getting started with Flux queries, check out the following:
|
||||
|
||||
- [Get started with Flux](/v2.0/query-data/get-started/) for a conceptual overview of Flux and parts of a Flux query.
|
||||
- [Execute queries](/v2.0/query-data/execute-queries/) to discover a variety of ways to run your queries.
|
||||
|
||||
{{% note %}}
|
||||
The following example is an in-depth walk-through of the steps required to window and aggregate data.
|
||||
The [`aggregateWindow()` function](#summing-up) performs these operations for you, but understanding
|
||||
|
|
|
@ -47,6 +47,7 @@ retrieving authentication tokens._
|
|||
|:------- |:----------- |
|
||||
| [auth](/v2.0/reference/cli/influx/auth) | Authorization management commands |
|
||||
| [bucket](/v2.0/reference/cli/influx/bucket) | Bucket management commands |
|
||||
| [delete](/v2.0/reference/cli/influx/delete) | Delete points from InfluxDB |
|
||||
| [help](/v2.0/reference/cli/influx/help) | Help about any command |
|
||||
| [org](/v2.0/reference/cli/influx/org) | Organization management commands |
|
||||
| [ping](/v2.0/reference/cli/influx/ping) | Check the InfluxDB `/health` endpoint |
|
||||
|
@ -62,5 +63,5 @@ retrieving authentication tokens._
|
|||
|:---- |:----------- |:----------:|
|
||||
| `-h`, `--help` | Help for the influx command | |
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -38,5 +38,5 @@ influx auth [command]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -25,5 +25,5 @@ influx auth active [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -40,5 +40,5 @@ influx auth create [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -25,5 +25,5 @@ influx auth delete [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -29,5 +29,5 @@ influx auth find [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -25,5 +25,5 @@ influx auth inactive [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -34,5 +34,5 @@ influx bucket [command]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -27,5 +27,5 @@ influx bucket create [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -25,5 +25,5 @@ influx bucket delete [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -28,5 +28,5 @@ influx bucket find [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -27,5 +27,5 @@ influx bucket update [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
---
|
||||
title: influx delete – Delete data from InfluxDB
|
||||
description: The 'influx delete' command deletes points from an InfluxDB bucket.
|
||||
menu:
|
||||
v2_0_ref:
|
||||
name: influx delete
|
||||
parent: influx
|
||||
weight: 101
|
||||
v2.0/tags: [delete]
|
||||
---
|
||||
|
||||
The `influx delete` command deletes [points](/v2.0/reference/glossary/#point)
|
||||
from an InfluxDB bucket.
|
||||
|
||||
## Usage
|
||||
```
|
||||
influx delete [flags]
|
||||
```
|
||||
|
||||
{{% warn %}}
|
||||
Running `influx delete` without the `-p` or `--predicate` flag deletes all data with
|
||||
timestamps between the specified `--start` and `--stop` times in the specified bucket.
|
||||
{{% /warn %}}
|
||||
|
||||
## Flags
|
||||
| Flag | Description | Input type |
|
||||
|:---- |:----------- |:----------:|
|
||||
| `-b`, `--bucket` | The name of bucket to remove data from | string |
|
||||
| `--bucket-id` | The ID of the bucket to remove data from | string |
|
||||
| `-h`, `--help` | Help for the `delete` command | |
|
||||
| `-o`, `--org` | The name of the organization that owns the bucket | string |
|
||||
| `--org-id` | The ID of the organization that owns the bucket | string |
|
||||
| `-p`, `--predicate` | InfluxQL-like predicate string (see [Delete predicate](/v2.0/reference/syntax/delete-predicate)) | string |
|
||||
| `--start` | The start time in RFC3339 format (i.e. `2009-01-02T23:00:00Z`) | string |
|
||||
| `--stop` | The stop time in RFC3339 format (i.e. `2009-01-02T23:00:00Z`) | string |
|
||||
|
||||
## Global flags
|
||||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
|
@ -24,5 +24,5 @@ influx help [command] [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -38,5 +38,5 @@ influx org [command]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -25,5 +25,5 @@ influx org create [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -25,5 +25,5 @@ influx org delete [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -26,5 +26,5 @@ influx org find [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -33,5 +33,5 @@ influx org members [command]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -27,5 +27,5 @@ influx org members add [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -26,5 +26,5 @@ influx org members list [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -27,5 +27,5 @@ influx org members remove [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -26,5 +26,5 @@ influx org update [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -29,5 +29,5 @@ influx ping [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -29,5 +29,5 @@ influx query [query literal or @/path/to/query.flux] [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -36,5 +36,5 @@ To use the Flux REPL, you must first authenticate with a [token](/v2.0/security/
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -28,5 +28,5 @@ influx setup [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -37,5 +37,5 @@ influx task [command]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -26,5 +26,5 @@ influx task create [query literal or @/path/to/query.flux] [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -25,5 +25,5 @@ influx task delete [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -29,5 +29,5 @@ influx task find [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -31,5 +31,5 @@ influx task log [command]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -27,5 +27,5 @@ influx task log find [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -26,5 +26,5 @@ influx task retry [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -32,5 +32,5 @@ influx task run [command]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -30,5 +30,5 @@ influx task run find [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -26,5 +26,5 @@ influx task update [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -34,5 +34,5 @@ influx user [command]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -25,5 +25,5 @@ influx user create [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -25,5 +25,5 @@ influx user delete [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -26,5 +26,5 @@ influx user find [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -27,5 +27,5 @@ influx user update [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -33,5 +33,5 @@ influx write [line protocol or @/path/to/points.txt] [flags]
|
|||
| Global flag | Description | Input type |
|
||||
|:----------- |:----------- |:----------:|
|
||||
| `--host` | HTTP address of InfluxDB (default `http://localhost:9999`) | string |
|
||||
| `--local` | Run commands locally against the filesystem | |
|
||||
| `-t`, `--token` | API token to be used throughout client calls | string |
|
||||
| `--local` | Run commands against the local filesystem | |
|
||||
| `-t`, `--token` | API token to use in client calls | string |
|
||||
|
|
|
@ -28,18 +28,30 @@ influxd [command]
|
|||
|
||||
## Flags
|
||||
|
||||
| Flag | Description | Input type |
|
||||
| :--------------------- | :------------------------------------------------------------------------------------- | :--------: |
|
||||
| `--assets-path` | Override default assets by serving from a specific directory (developer mode) | string |
|
||||
| `--bolt-path` | Path to boltdb database (default `~/.influxdbv2/influxd.bolt`) | string |
|
||||
| `--e2e-testing` | Add /debug/flush endpoint to clear stores; used for end-to-end tests (default `false`) | |
|
||||
| `--engine-path` | Path to persistent engine files (default `~/.influxdbv2/engine`) | string |
|
||||
| `-h`, `--help` | Help for `influxd` | |
|
||||
| `--http-bind-address` | Bind address for the REST HTTP API (default `:9999`) | string |
|
||||
| `--log-level` | Supported log levels are debug, info, and error (default `info`) | string |
|
||||
| `--reporting-disabled` | Disable sending telemetry data to **https:<nolink>//telemetry.influxdata.com** | |
|
||||
| `--secret-store` | Data store for secrets (bolt or vault) (default `bolt`) | string |
|
||||
| `--session-length` | TTL in minutes for newly created sessions (default `60`) | integer |
|
||||
| `--session-renew-disabled` | Disables automatically extending session TTL on request | |
|
||||
| `--store` | Data store for REST resources (bolt or memory) (default `bolt`) | string |
|
||||
| `--tracing-type` | Supported tracing types (log or jaeger) | string |
|
||||
| Flag | Description | Input type |
|
||||
| :--------------------- | :---------------------------------------------------------------------------------------------------- | :--------: |
|
||||
| `--assets-path` | Override default assets by serving from a specific directory (developer mode) | string |
|
||||
| `--bolt-path` | Path to boltdb database (default `~/.influxdbv2/influxd.bolt`) | string |
|
||||
| `--e2e-testing` | Add /debug/flush endpoint to clear stores; used for end-to-end tests (default `false`) | |
|
||||
| `--engine-path` | Path to persistent engine files (default `~/.influxdbv2/engine`) | string |
|
||||
| `-h`, `--help` | Help for `influxd` | |
|
||||
| `--http-bind-address` | Bind address for the REST HTTP API (default `:9999`) | string |
|
||||
| `--log-level` | Supported log levels are debug, info, and error (default `info`) | string |
|
||||
| `--reporting-disabled` | Disable sending telemetry data to **https:<nolink>//telemetry.influxdata.com** | |
|
||||
| `--secret-store` | Data store for secrets (bolt or vault) (default `bolt`) | string |
|
||||
| `--session-length` | TTL in minutes for newly created sessions (default `60`) | integer |
|
||||
| `--session-renew-disabled` | Disables automatically extending session TTL on request | |
|
||||
| `--store` | Data store for REST resources (bolt or memory) (default `bolt`) | string |
|
||||
| `--tls-cert` | Path to TLS certificate file | string |
|
||||
| `--tls-key` | Path to TLS private key file | string |
|
||||
| `--tracing-type` | Supported tracing types (log or jaeger) | string |
|
||||
| `--vault-addr ` | Address of the Vault server (example: `https://127.0.0.1:8200/`) | string |
|
||||
| `--vault-cacert` | Path to a PEM-encoded CA certificate file | string |
|
||||
| `--vault-capath` | Path to a directory of PEM-encoded CA certificate files | string |
|
||||
| `--vault-client-cert` | Path to a PEM-encoded client certificate | string |
|
||||
| `--vault-client-key` | Path to an unencrypted, PEM-encoded private key which corresponds to the matching client certificate | string |
|
||||
| `--vault-max-retries` | Maximum number of retries when encountering a 5xx error code (default `2`) | integer |
|
||||
| `--vault-client-timeout` | Vault client timeout (default `60s`) | duration |
|
||||
| `--vault-skip-verify` | Skip certificate verification when communicating with Vault | |
|
||||
| `--vault-tls-server-name` | Name to use as the SNI host when connecting to Vault via TLS | string |
|
||||
| `--vault-token` | Vault authentication token | string |
|
||||
|
|
|
@ -28,18 +28,30 @@ influxd run
|
|||
|
||||
## Flags
|
||||
|
||||
| Flag | Description | Input type |
|
||||
| :--------------------- | :------------------------------------------------------------------------------------- | :--------: |
|
||||
| `--assets-path` | Override default assets by serving from a specific directory (developer mode) | string |
|
||||
| `--bolt-path` | Path to boltdb database (default `~/.influxdbv2/influxd.bolt`) | string |
|
||||
| `--e2e-testing` | Add /debug/flush endpoint to clear stores; used for end-to-end tests (default `false`) | |
|
||||
| `--engine-path` | Path to persistent engine files (default `~/.influxdbv2/engine`) | string |
|
||||
| `-h`, `--help` | Help for `influxd` | |
|
||||
| `--http-bind-address` | Bind address for the REST HTTP API (default `:9999`) | string |
|
||||
| `--log-level` | Supported log levels are debug, info, and error (default `info`) | string |
|
||||
| `--reporting-disabled` | Disable sending telemetry data to **https:<nolink>//telemetry.influxdata.com** | |
|
||||
| `--secret-store` | Data store for secrets (bolt or vault) (default `bolt`) | string |
|
||||
| `--session-length` | TTL in minutes for newly created sessions (default `60`) | integer |
|
||||
| `--session-renew-disabled` | Disables automatically extending session TTL on request | |
|
||||
| `--store` | Data store for REST resources (bolt or memory) (default `bolt`) | string |
|
||||
| `--tracing-type` | Supported tracing types (log or jaeger) | string |
|
||||
| Flag | Description | Input type |
|
||||
| :--------------------- | :---------------------------------------------------------------------------------------------------- | :--------: |
|
||||
| `--assets-path` | Override default assets by serving from a specific directory (developer mode) | string |
|
||||
| `--bolt-path` | Path to boltdb database (default `~/.influxdbv2/influxd.bolt`) | string |
|
||||
| `--e2e-testing` | Add /debug/flush endpoint to clear stores; used for end-to-end tests (default `false`) | |
|
||||
| `--engine-path` | Path to persistent engine files (default `~/.influxdbv2/engine`) | string |
|
||||
| `-h`, `--help` | Help for `influxd` | |
|
||||
| `--http-bind-address` | Bind address for the REST HTTP API (default `:9999`) | string |
|
||||
| `--log-level` | Supported log levels are debug, info, and error (default `info`) | string |
|
||||
| `--reporting-disabled` | Disable sending telemetry data to **https:<nolink>//telemetry.influxdata.com** | |
|
||||
| `--secret-store` | Data store for secrets (bolt or vault) (default `bolt`) | string |
|
||||
| `--session-length` | TTL in minutes for newly created sessions (default `60`) | integer |
|
||||
| `--session-renew-disabled` | Disables automatically extending session TTL on request | |
|
||||
| `--store` | Data store for REST resources (bolt or memory) (default `bolt`) | string |
|
||||
| `--tls-cert` | Path to TLS certificate file | string |
|
||||
| `--tls-key` | Path to TLS private key file | string |
|
||||
| `--tracing-type` | Supported tracing types (log or jaeger) | string |
|
||||
| `--vault-addr ` | Address of the Vault server (example: `https://127.0.0.1:8200/`) | string |
|
||||
| `--vault-cacert` | Path to a PEM-encoded CA certificate file | string |
|
||||
| `--vault-capath` | Path to a directory of PEM-encoded CA certificate files | string |
|
||||
| `--vault-client-cert` | Path to a PEM-encoded client certificate | string |
|
||||
| `--vault-client-key` | Path to an unencrypted, PEM-encoded private key which corresponds to the matching client certificate | string |
|
||||
| `--vault-max-retries` | Maximum number of retries when encountering a 5xx error code (default `2`) | integer |
|
||||
| `--vault-client-timeout` | Vault client timeout (default `60s`) | duration |
|
||||
| `--vault-skip-verify` | Skip certificate verification when communicating with Vault | |
|
||||
| `--vault-tls-server-name` | Name to use as the SNI host when connecting to Vault via TLS | string |
|
||||
| `--vault-token` | Vault authentication token | string |
|
||||
|
|
|
@ -24,22 +24,16 @@ To configure InfluxDB, use the following configuration options when starting the
|
|||
- [--session-renew-disabled](#session-renew-disabled)
|
||||
- [--store](#store)
|
||||
- [--tracing-type](#tracing-type)
|
||||
|
||||
```sh
|
||||
influxd \
|
||||
--assets-path=/path/to/custom/assets-dir \
|
||||
--bolt-path=~/.influxdbv2/influxd.bolt \
|
||||
--e2e-testing \
|
||||
--engine-path=~/.influxdbv2/engine \
|
||||
--http-bind-address=:9999 \
|
||||
--log-level=info \
|
||||
--reporting-disabled \
|
||||
--secret-store=bolt \
|
||||
--session-length=60 \
|
||||
--session-renew-disabled \
|
||||
--store=bolt \
|
||||
--tracing-type=log
|
||||
```
|
||||
- [--vault-addr](#vault-addr)
|
||||
- [--vault-cacert](#vault-cacert)
|
||||
- [--vault-capath](#vault-capath)
|
||||
- [--vault-client-cert](#vault-client-cert)
|
||||
- [--vault-client-key](#vault-client-key)
|
||||
- [--vault-max-retries](#vault-max-retries)
|
||||
- [--vault-client-timeout](#vault-client-timeout)
|
||||
- [--vault-skip-verify](#vault-skip-verify)
|
||||
- [--vault-tls-server-name](#vault-tls-server-name)
|
||||
- [--vault-token](#vault-token)
|
||||
|
||||
---
|
||||
|
||||
|
@ -189,3 +183,136 @@ Tracing is disabled by default.
|
|||
```sh
|
||||
influxd --tracing-type=log
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## --vault-addr
|
||||
Specifies the address of the Vault server expressed as a URL and port.
|
||||
For example: `https://127.0.0.1:8200/`.
|
||||
|
||||
```sh
|
||||
influxd --vault-addr=https://127.0.0.1:8200/
|
||||
```
|
||||
|
||||
_You can also set this using the `VAULT_ADDR` environment variable, however
|
||||
`influxd` flags take precedence over environment variables._
|
||||
|
||||
---
|
||||
|
||||
## --vault-cacert
|
||||
Specifies the path to a PEM-encoded CA certificate file on the local disk.
|
||||
This file is used to verify the Vault server's SSL certificate.
|
||||
**This setting takes precedence over the [`--vault-capath`](#vault-capath) setting.**
|
||||
|
||||
```sh
|
||||
influxd --vault-cacert=/path/to/ca.pem
|
||||
```
|
||||
|
||||
_You can also set this using the `VAULT_CACERT` environment variable, however
|
||||
`influxd` flags take precedence over environment variables._
|
||||
|
||||
---
|
||||
|
||||
## --vault-capath
|
||||
Specifies the path to a directory of PEM-encoded CA certificate files on the local disk.
|
||||
These certificates are used to verify the Vault server's SSL certificate.
|
||||
|
||||
```sh
|
||||
influxd --vault-capath=/path/to/certs/
|
||||
```
|
||||
|
||||
_You can also set this using the `VAULT_CAPATH` environment variable, however
|
||||
`influxd` flags take precedence over environment variables._
|
||||
|
||||
---
|
||||
|
||||
## --vault-client-cert
|
||||
Specifies the path to a PEM-encoded client certificate on the local disk.
|
||||
This file is used for TLS communication with the Vault server.
|
||||
|
||||
```sh
|
||||
influxd --vault-client-cert=/path/to/client_cert.pem
|
||||
```
|
||||
|
||||
_You can also set this using the `VAULT_CLIENT_CERT` environment variable, however
|
||||
`influxd` flags take precedence over environment variables._
|
||||
|
||||
---
|
||||
|
||||
## --vault-client-key
|
||||
Specifies the path to an unencrypted, PEM-encoded private key on disk which
|
||||
corresponds to the matching client certificate.
|
||||
|
||||
```sh
|
||||
influxd --vault-client-key=/path/to/private_key.pem
|
||||
```
|
||||
|
||||
_You can also set this using the `VAULT_CLIENT_KEY` environment variable, however
|
||||
`influxd` flags take precedence over environment variables._
|
||||
|
||||
---
|
||||
|
||||
## --vault-max-retries
|
||||
Specifies the maximum number of retries when encountering a 5xx error code.
|
||||
The default is 2 (for three attempts in total). Set this to 0 or less to disable retrying.
|
||||
|
||||
**Default:** `2`
|
||||
|
||||
```sh
|
||||
influxd --vault-max-retries=2
|
||||
```
|
||||
|
||||
_You can also set this using the `VAULT_MAX_RETRIES` environment variable, however
|
||||
`influxd` flags take precedence over environment variables._
|
||||
|
||||
---
|
||||
|
||||
## --vault-client-timeout
|
||||
Specifies the Vault client timeout.
|
||||
|
||||
**Default:** `60s`
|
||||
|
||||
```sh
|
||||
influxd --vault-client-timeout=60s
|
||||
```
|
||||
|
||||
_You can also set this using the `VAULT_CLIENT_TIMEOUT` environment variable, however
|
||||
`influxd` flags take precedence over environment variables._
|
||||
|
||||
---
|
||||
|
||||
## --vault-skip-verify
|
||||
Skip certificate verification when communicating with Vault.
|
||||
_Setting this variable voids [Vault's security model](https://www.vaultproject.io/docs/internals/security.html)
|
||||
and is **not recommended**._
|
||||
|
||||
```sh
|
||||
influxd --vault-skip-verify
|
||||
```
|
||||
|
||||
_You can also set this using the `VAULT_SKIP_VERIFY` environment variable, however
|
||||
`influxd` flags take precedence over environment variables._
|
||||
|
||||
---
|
||||
|
||||
## --vault-tls-server-name
|
||||
Specifies the name to use as the Server Name Indication (SNI) host when connecting via TLS.
|
||||
|
||||
```sh
|
||||
influxd --vault-tls-server-name=secure.example.com
|
||||
```
|
||||
|
||||
_You can also set this using the `VAULT_TLS_SERVER_NAME` environment variable, however
|
||||
`influxd` flags take precedence over environment variables._
|
||||
|
||||
---
|
||||
|
||||
## --vault-token
|
||||
Specifies the Vault authentication token use when authenticating with Vault.
|
||||
|
||||
```sh
|
||||
influxd --vault-token=exAmple-t0ken-958a-f490-c7fd0eda5e9e
|
||||
```
|
||||
|
||||
_You can also set this using the `VAULT_TOKEN` environment variable, however
|
||||
`influxd` flags take precedence over environment variables._
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
title: Flux query language
|
||||
title: Flux data scripting language
|
||||
description: Reference articles for Flux functions and the Flux language specification.
|
||||
v2.0/tags: [flux]
|
||||
menu:
|
||||
v2_0_ref:
|
||||
name: Flux query language
|
||||
name: Flux language
|
||||
weight: 4
|
||||
---
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ description: >
|
|||
menu:
|
||||
v2_0_ref:
|
||||
name: Flux specification
|
||||
parent: Flux query language
|
||||
parent: Flux language
|
||||
weight: 103
|
||||
v2.0/tags: [flux]
|
||||
---
|
||||
|
|
|
@ -167,13 +167,24 @@ duration_unit = "y" | "mo" | "w" | "d" | "h" | "m" | "s" | "ms" | "us" | "µs" |
|
|||
| ns | nanoseconds (1 billionth of a second) |
|
||||
|
||||
Durations represent a length of time.
|
||||
Lengths of time are dependent on specific instants in time they occur and as such, durations do not represent a fixed amount of time.
|
||||
No amount of seconds is equal to a day, as days vary in their number of seconds.
|
||||
No amount of days is equal to a month, as months vary in their number of days.
|
||||
A duration consists of three basic time units: seconds, days and months.
|
||||
Lengths of time are dependent on specific instants in time they occur and as such,
|
||||
durations do not represent a fixed amount of time.
|
||||
There are no amount of days equal to a month, as months vary in their number of days.
|
||||
Durations are a tuple of positive integers that represent a duration and the sign
|
||||
of the duration (positive or negative).
|
||||
Durations are implemented this way so it is possible to determine whether a duration is positive or negative.
|
||||
Since duration values depend on their context, the only way to know if a duration
|
||||
is a positive or negative number is if all magnitudes have the same sign.
|
||||
In the canonical implementation, this is implemented as a tuple of the months and
|
||||
nanoseconds and a boolean that indicates whether it is positive or negative.
|
||||
The spec does not prescribe a specific implementation and other implementations
|
||||
may use a different internal representation.
|
||||
|
||||
Durations can be combined via addition and subtraction.
|
||||
Durations can be multiplied by an integer value.
|
||||
Durations cannot be combined by addition and subtraction.
|
||||
All magnitudes in the tuple must be a positive integer which cannot be guaranteed
|
||||
when using addition and subtraction.
|
||||
Durations can be multiplied by any integer value.
|
||||
The unary negative operator is the equivalent of multiplying the duration by -1.
|
||||
These operations are performed on each time unit independently.
|
||||
|
||||
##### Examples of duration literals
|
||||
|
@ -181,9 +192,11 @@ These operations are performed on each time unit independently.
|
|||
```js
|
||||
1s
|
||||
10d
|
||||
1h15m // 1 hour and 15 minutes
|
||||
1h15m // 1 hour and 15 minutes
|
||||
5w
|
||||
1mo5d // 1 month and 5 days
|
||||
1mo5d // 1 month and 5 days
|
||||
-1mo5d // negative 1 month and 5 days
|
||||
5w * 2 // 10 weeks
|
||||
```
|
||||
Durations can be added to date times to produce a new date time.
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ aliases:
|
|||
v2.0/tags: [flux, functions, package]
|
||||
menu:
|
||||
v2_0_ref:
|
||||
parent: Flux query language
|
||||
parent: Flux language
|
||||
weight: 102
|
||||
---
|
||||
|
||||
|
|
|
@ -162,7 +162,12 @@ The operation:
|
|||
|
||||
```js
|
||||
// ...
|
||||
|> to(bucket:"my-bucket", org:"my-org", tagColumns:["tag1"], fieldFn: (r) => return {"hum": r.hum, "temp": r.temp})
|
||||
|> to(
|
||||
bucket:"my-bucket",
|
||||
org:"my-org",
|
||||
tagColumns:["tag1"],
|
||||
fieldFn: (r) => ({"hum": r.hum, "temp": r.temp})
|
||||
)
|
||||
```
|
||||
|
||||
is equivalent to writing the above data using the following line protocol:
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
---
|
||||
title: prometheus.histogramQuantile() function
|
||||
description: >
|
||||
The `prometheus.histogramQuantile()` function calculates quantiles on a set of values
|
||||
assuming the given histogram data is scraped or read from a Prometheus data source.
|
||||
menu:
|
||||
v2_0_ref:
|
||||
name: prometheus.histogramQuantile
|
||||
parent: Prometheus
|
||||
weight: 301
|
||||
---
|
||||
|
||||
The `prometheus.histogramQuantile()` function calculates quantiles on a set of values
|
||||
assuming the given histogram data is scraped or read from a Prometheus data source.
|
||||
|
||||
_**Function type:** Aggregate_
|
||||
|
||||
{{% warn %}}
|
||||
The `prometheus.histogramQuantile()` function is currently experimental and subject to change at any time.
|
||||
By using this function, you accept the [risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
|
||||
{{% /warn %}}
|
||||
|
||||
```js
|
||||
import "experimental/prometheus"
|
||||
|
||||
prometheus.histogramQuantile(
|
||||
quantile: 0.99
|
||||
)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
### quantile
|
||||
A value between 0.0 and 1.0 indicating the desired quantile.
|
||||
|
||||
_**Data type:** Float_
|
||||
|
||||
## Examples
|
||||
|
||||
### Calculate the 99th quantile in Prometheus data
|
||||
```js
|
||||
import "experimental/prometheus"
|
||||
|
||||
prometheus.scrape(url: "https://example-url.com/metrics")
|
||||
|> prometheus.histogramQuantile(quantile: 0.99)
|
||||
```
|
|
@ -58,7 +58,7 @@ the **field value** as the **column value**.
|
|||
| ----- | ------------ | --------- |
|
||||
| timestamp | measurement-name | field value |
|
||||
|
||||
If using the built-in `from()` function, use [`pivot()`](/v2.0/reference/flux/stdlib/transformations/pivot/)
|
||||
If using the built-in `from()` function, use [`pivot()`](/v2.0/reference/flux/stdlib/built-in/transformations/pivot/)
|
||||
to transform data into the structure `experimetnal.to()` expects.
|
||||
_[See the example below](#use-pivot-to-shape-data-for-experimental-to)._
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: monitor.check() function
|
||||
description: >
|
||||
The `monitor.check()` function function checks input data and assigns a level
|
||||
The `monitor.check()` function checks input data and assigns a level
|
||||
(`ok`, `info`, `warn`, or `crit`) to each row based on predicate functions.
|
||||
aliases:
|
||||
- /v2.0/reference/flux/functions/monitor/check/
|
||||
|
@ -12,7 +12,7 @@ menu:
|
|||
weight: 202
|
||||
---
|
||||
|
||||
The `monitor.check()` function function checks input data and assigns a level
|
||||
The `monitor.check()` function checks input data and assigns a level
|
||||
(`ok`, `info`, `warn`, or `crit`) to each row based on predicate functions.
|
||||
|
||||
_**Function type:** Transformation_
|
||||
|
|
|
@ -50,7 +50,7 @@ Submitting a batch of points using a single HTTP request to the write endpoints
|
|||
InfluxData typically recommends batch sizes of 5,000-10,000 points.
|
||||
In some use cases, performance may improve with significantly smaller or larger batches.
|
||||
|
||||
Related entries: [line protocol](/v2.0/reference/line-protocol/), [point](#point)
|
||||
Related entries: [line protocol](/v2.0/reference/syntax/line-protocol/), [point](#point)
|
||||
|
||||
### batch size
|
||||
|
||||
|
@ -136,7 +136,7 @@ Each record consists of one or more fields, separated by commas.
|
|||
CSV file format is not fully standardized.
|
||||
|
||||
InfluxData uses annotated CSV (comma-separated values) format to encode HTTP responses and results returned to the Flux csv.from() function.
|
||||
For more detail, see [Annotated CSV](/v2.0/reference/annotated-csv/).
|
||||
For more detail, see [Annotated CSV](/v2.0/reference/syntax/annotated-csv/).
|
||||
|
||||
<!-- enterprise
|
||||
### cardinality
|
||||
|
@ -521,7 +521,7 @@ The InfluxDB 2.0 user interface (UI) can be used to view log history and data.
|
|||
### Line protocol (LP)
|
||||
|
||||
The text based format for writing points to InfluxDB.
|
||||
See [line protocol](/v2.0/reference/line-protocol/).
|
||||
See [line protocol](/v2.0/reference/syntax/line-protocol/).
|
||||
|
||||
## M
|
||||
|
||||
|
@ -987,8 +987,7 @@ A data type that represents a single point in time with nanosecond precision.
|
|||
### time series data
|
||||
|
||||
Sequence of data points typically consisting of successive measurements made from the same source over a time interval.
|
||||
Time series data shows how data evolves over
|
||||
time.
|
||||
Time series data shows how data evolves over time.
|
||||
On a time series data graph, one of the axes is always time.
|
||||
Time series data may be regular or irregular.
|
||||
Regular time series data changes in constant intervals.
|
||||
|
@ -999,7 +998,7 @@ Irregular time series data changes at non-constant intervals.
|
|||
The date and time associated with a point.
|
||||
Time in InfluxDB is in UTC.
|
||||
|
||||
To specify time when writing data, see [Elements of line protocol](/v2.0/reference/line-protocol/#elements-of-line-protocol).
|
||||
To specify time when writing data, see [Elements of line protocol](/v2.0/reference/syntax/line-protocol/#elements-of-line-protocol).
|
||||
To specify time when querying data, see [Query InfluxDB with Flux](/v2.0/query-data/get-started/query-influxdb/#2-specify-a-time-range).
|
||||
|
||||
Related entries: [point](#point)
|
||||
|
|
|
@ -18,7 +18,7 @@ InfluxDB 2.0 uses the following columnar table structure to store data:
|
|||
- **Data rows:** all rows that contain time series data. For details about the type of data stored in InfluxDB, see [InfluxDB data elements](/v2.0/reference/key-concepts/data-elements/).
|
||||
- **Group keys** determine the contents of output tables in Flux by grouping records that share common values in specified columns. Learn more about [grouping your data with Flux](/v2.0/query-data/guides/group-data/).
|
||||
|
||||
For specifications on the InfluxDB 2.0 table structure, see [Tables](/v2.0/reference/annotated-csv/#tables).
|
||||
For specifications on the InfluxDB 2.0 table structure, see [Tables](/v2.0/reference/syntax/annotated-csv/#tables).
|
||||
|
||||
**_Tip:_** To visualize your table structure in the InfluxDB user interface, click the **Data Explorer** icon
|
||||
in the sidebar, create a query, click **Submit**, and then select **View Raw Data**.
|
||||
|
|
|
@ -11,11 +11,47 @@ aliases:
|
|||
---
|
||||
|
||||
{{% note %}}
|
||||
_The latest release of InfluxDB v2.0 alpha includes **Flux v0.49.0**.
|
||||
_The latest release of InfluxDB v2.0 alpha includes **Flux v0.52.0**.
|
||||
Though newer versions of Flux may be available, they will not be included with
|
||||
InfluxDB until the next InfluxDB v2.0 release._
|
||||
{{% /note %}}
|
||||
|
||||
## v0.52.0 [2019-10-30]
|
||||
|
||||
### Features
|
||||
- `Visitor` uses `Rc` for nodes.
|
||||
- Add `EvalOptions`.
|
||||
|
||||
### Bug fixes
|
||||
- Correctly lex `µs`.
|
||||
|
||||
---
|
||||
|
||||
## v0.51.0 [2019-10-24]
|
||||
|
||||
### Breaking changes
|
||||
- Update the Flux SPEC to remove duration addition and subtraction.
|
||||
- Turn duration value into a vector.
|
||||
|
||||
### Features
|
||||
- Implementations for type substitutions and constraints.
|
||||
- Add semantic analysis.
|
||||
- Updated the duration value to include months and negative flag.
|
||||
- Create a flatbuffers schema for AST.
|
||||
- Add initial C binding for parsing an AST.
|
||||
- Create a tool for updating `.flux` tests in-place.
|
||||
- Add walk implementation.
|
||||
- Turn duration value into a vector.
|
||||
- Define initial Flux data types.
|
||||
|
||||
### Bug fixes
|
||||
- Update libflux parser to match the Go parser.
|
||||
- Allow data collected by `prometheus.scrape()` to be used by `histogramQuantile()`.
|
||||
- Remove mock allocator.
|
||||
- Validate url for `sql.from()`, `sql.to()`, and `socket.from()`.
|
||||
|
||||
---
|
||||
|
||||
## v0.50.2 [2019-10-24]
|
||||
|
||||
### Bug fixes
|
||||
|
|
|
@ -7,6 +7,31 @@ menu:
|
|||
parent: Release notes
|
||||
weight: 101
|
||||
---
|
||||
## v2.0.0-alpha.19 [2019-10-30]
|
||||
|
||||
### Features
|
||||
- Add shortcut for toggling comments and submitting in Script Editor.
|
||||
|
||||
### UI Improvements
|
||||
- Redesign page headers to be more space-efficient.
|
||||
- Add 403 handler that redirects back to the sign-in page on oats-generated routes.
|
||||
|
||||
### Bug Fixes
|
||||
- Ensure users are created with an active status.
|
||||
- Added missing string values for `CacheStatus` type.
|
||||
- Disable saving for threshold check if no threshold selected.
|
||||
- Query variable selector shows variable keys, not values.
|
||||
- Create Label overlay disables the submit button and returns a UI error if name field is empty.
|
||||
- Log error as info message on unauthorized API call attempts.
|
||||
- Ensure `members` and `owners` endpoints lead to 404 when organization resource does not exist.
|
||||
- Telegraf UI filter functionality shows results based on input name.
|
||||
- Fix Telegraf UI sort functionality.
|
||||
- Fix task UI sort functionality.
|
||||
- Exiting a configuration of a dashboard cell properly renders the cell content.
|
||||
- Newly created checks appear on the checklist.
|
||||
- Changed task runs success status code from 200 to 201 to match Swagger documentation.
|
||||
- Text areas have the correct height.
|
||||
|
||||
## v2.0.0-alpha.18 [2019-09-26]
|
||||
|
||||
### Features
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
title: InfluxDB syntaxes
|
||||
description: >
|
||||
InfluxDB uses a handful of languages and syntaxes to perform tasks such as
|
||||
writing, querying, processing, and deleting data.
|
||||
weight: 5
|
||||
menu:
|
||||
v2_0_ref:
|
||||
name: Syntax
|
||||
v2.0/tags: [syntax]
|
||||
---
|
||||
|
||||
InfluxDB uses a handful of languages and syntaxes to perform tasks such as
|
||||
writing, querying, processing, and deleting data.
|
||||
The following articles provide information about the different syntaxes used with
|
||||
InfluxDB and the contexts in which they're used:
|
||||
|
||||
{{< children >}}
|
|
@ -1,16 +1,25 @@
|
|||
---
|
||||
title: Annotated CSV syntax
|
||||
list_title: Annotated CSV
|
||||
description: >
|
||||
Annotated CSV format is used to encode HTTP responses and results returned to the Flux `csv.from()` function.
|
||||
weight: 6
|
||||
Flux returns raw results in Annotated CSV format and also reads Annotated CSV
|
||||
using the `csv.from()` function.
|
||||
weight: 103
|
||||
menu:
|
||||
v2_0_ref:
|
||||
parent: Syntax
|
||||
name: Annotated CSV
|
||||
v2.0/tags: [csv, syntax]
|
||||
aliases:
|
||||
- /v2.0/reference/annotated-csv/
|
||||
---
|
||||
|
||||
Annotated CSV (comma-separated values) format is used to encode HTTP responses and results returned to the Flux [`csv.from()` function](https://v2.docs.influxdata.com/v2.0/reference/flux/stdlib/csv/from/).
|
||||
Flux returns raw results in Annotated CSV format and also reads Annotated CSV
|
||||
using the [`csv.from()` function](/v2.0/reference/flux/stdlib/csv/from/).
|
||||
|
||||
CSV tables must be encoded in UTF-8 and Unicode Normal Form C as defined in [UAX15](http://www.unicode.org/reports/tr15/). Line endings must be CRLF (Carriage Return Line Feed) as defined by the `text/csv` MIME type in [RFC 4180](https://tools.ietf.org/html/rfc4180).
|
||||
CSV tables must be encoded in UTF-8 and Unicode Normal Form C as defined in [UAX15](http://www.unicode.org/reports/tr15/).
|
||||
Line endings must be CRLF (Carriage Return Line Feed) as defined by the `text/csv`
|
||||
MIME type in [RFC 4180](https://tools.ietf.org/html/rfc4180).
|
||||
|
||||
## Examples
|
||||
|
||||
|
@ -71,7 +80,10 @@ my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,
|
|||
|
||||
In addition to the data columns, a table may include the following columns:
|
||||
|
||||
- **Annotation column**: Only used in annotation rows. Always the first column. Displays the name of an annotation. Value can be empty or a supported [annotation](#annotations). You'll notice a space for this column for the entire length of the table, so rows appear to start with `,`.
|
||||
- **Annotation column**: Only used in annotation rows. Always the first column.
|
||||
Displays the name of an annotation. Value can be empty or a supported [annotation](#annotations).
|
||||
You'll notice a space for this column for the entire length of the table,
|
||||
so rows appear to start with `,`.
|
||||
|
||||
- **Result column**: Contains the name of the result specified by the query.
|
||||
|
||||
|
@ -130,23 +142,25 @@ my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,west,
|
|||
|
||||
Flux supports the following dialect options for `text/csv` format.
|
||||
|
||||
| Option | Description| Default |
|
||||
| :-------- | :--------- | :-------|
|
||||
| **header** | If true, the header row is included.| `true`|
|
||||
| **delimiter** | Character used to delimit columns. | `,`|
|
||||
| **quoteChar** | Character used to quote values containing the delimiter. |`"`|
|
||||
| **annotations** | List of annotations to encode (datatype, group, or default). |`empty`|
|
||||
| **commentPrefix** | String prefix to identify a comment. Always added to annotations. |`#`|
|
||||
| Option | Description | Default |
|
||||
| :-------- | :--------- |:------- |
|
||||
| **header** | If true, the header row is included. | `true` |
|
||||
| **delimiter** | Character used to delimit columns. | `,` |
|
||||
| **quoteChar** | Character used to quote values containing the delimiter. | `"` |
|
||||
| **annotations** | List of annotations to encode (datatype, group, or default). | `empty` |
|
||||
| **commentPrefix** | String prefix to identify a comment. Always added to annotations. | `#` |
|
||||
|
||||
### Annotations
|
||||
|
||||
Annotation rows describe column properties, and start with `#` (or commentPrefix value). The first column in an annotation row always contains the annotation name. Subsequent columns contain annotation values as shown in the table below.
|
||||
Annotation rows describe column properties, and start with `#` (or commentPrefix value).
|
||||
The first column in an annotation row always contains the annotation name.
|
||||
Subsequent columns contain annotation values as shown in the table below.
|
||||
|
||||
|Annotation name | Values| Description |
|
||||
| :-------- | :--------- | :-------|
|
||||
| **datatype** | a [valid data type](#valid-data-types) | Describes the type of data. |
|
||||
| **group** | boolean flag `true` or `false` | Indicates the column is part of the group key.|
|
||||
| **default** | a [valid data type](#valid-data-types) |Value to use for rows with an empty string value.|
|
||||
| Annotation name | Values | Description |
|
||||
|:-------- |:--------- | :------- |
|
||||
| **datatype** | a [valid data type](#valid-data-types) | Describes the type of data. |
|
||||
| **group** | boolean flag `true` or `false` | Indicates the column is part of the group key. |
|
||||
| **default** | a [valid data type](#valid-data-types) | Value to use for rows with an empty string value. |
|
||||
|
||||
{{% note %}}
|
||||
To encode a table with its group key, the `datatype`, `group`, and `default` annotations must be included. If a table has no rows, the `default` annotation provides the group key values.
|
||||
|
@ -174,16 +188,16 @@ csv.from(csv:a) |> yield()
|
|||
|
||||
### Valid data types
|
||||
|
||||
| Datatype | Flux type | Description |
|
||||
| :-------- | :--------- | :-----------------------------------------------------------------------------|
|
||||
| boolean | bool | a truth value, one of "true" or "false" |
|
||||
| unsignedLong | uint | an unsigned 64-bit integer |
|
||||
| long | int | a signed 64-bit integer |
|
||||
| double | float | an IEEE-754 64-bit floating-point number |
|
||||
| string | string | a UTF-8 encoded string |
|
||||
| base64Binary | bytes | a base64 encoded sequence of bytes as defined in RFC 4648 |
|
||||
| dateTime | time | an instant in time, may be followed with a colon : and a description of the format |
|
||||
| duration | duration | a length of time represented as an unsigned 64-bit integer number of nanoseconds |
|
||||
| Datatype | Flux type | Description |
|
||||
| :-------- | :--------- | :---------- |
|
||||
| boolean | bool | a truth value, one of "true" or "false" |
|
||||
| unsignedLong | uint | an unsigned 64-bit integer |
|
||||
| long | int | a signed 64-bit integer |
|
||||
| double | float | an IEEE-754 64-bit floating-point number |
|
||||
| string | string | a UTF-8 encoded string |
|
||||
| base64Binary | bytes | a base64 encoded sequence of bytes as defined in RFC 4648 |
|
||||
| dateTime | time | an instant in time, may be followed with a colon : and a description of the format |
|
||||
| duration | duration | a length of time represented as an unsigned 64-bit integer number of nanoseconds |
|
||||
|
||||
## Errors
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
---
|
||||
title: Delete predicate syntax
|
||||
list_title: Delete predicate
|
||||
description: >
|
||||
The InfluxDB `/delete` endpoint uses an InfluxQL-like predicate syntax to determine
|
||||
what data points to delete.
|
||||
menu:
|
||||
v2_0_ref:
|
||||
parent: Syntax
|
||||
name: Delete predicate
|
||||
weight: 104
|
||||
v2.0/tags: [syntax, delete]
|
||||
related:
|
||||
- /v2.0/reference/cli/influx/delete/
|
||||
---
|
||||
|
||||
The InfluxDB `/delete` endpoint uses an InfluxQL-like predicate syntax to determine
|
||||
what data [points](/v2.0/reference/glossary/#point) to delete.
|
||||
InfluxDB uses the delete predicate to evaluate the [series keys](/v2.0/reference/glossary/#series-key)
|
||||
of points in the time range specified in the delete request.
|
||||
Points with series keys that evaluate to `true` for the given predicate are deleted.
|
||||
Points with series keys that evaluate to `false` are preserved.
|
||||
|
||||
A delete predicate is comprised of one or more [predicate expressions](/v2.0/reference/glossary/#predicate-expression).
|
||||
The left operand of the predicate expression is the column name.
|
||||
The right operand is the column value.
|
||||
Operands are compared using [comparison operators](#comparison-operators).
|
||||
Use [logical operators](#logical-operators) to combine two or more predicate expressions.
|
||||
|
||||
##### Example delete predicate
|
||||
```sql
|
||||
key1="value1" AND key2="value"
|
||||
```
|
||||
{{% note %}}
|
||||
Predicate expressions can use any column or tag except `_time` or `_value`.
|
||||
{{% /note %}}
|
||||
|
||||
## Logical operators
|
||||
Logical operators join two or more predicate expressions.
|
||||
|
||||
| Operator | Description |
|
||||
|:-------- |:----------- |
|
||||
| `AND` | Both left and right operands must be `true` for the expression to be `true`. |
|
||||
|
||||
## Comparison operators
|
||||
Comparison operators compare left and right operands and return `true` or `false`.
|
||||
|
||||
| Operator | Description | Example | Result |
|
||||
|:-------- |:----------- |:-------: |:------:|
|
||||
| `=` | Equal to | `"abc"="abc"` | `true` |
|
||||
|
||||
## Delete predicate examples
|
||||
|
||||
### Delete points with a specific measurement
|
||||
The following will delete points in the `sensorData` measurement:
|
||||
|
||||
```sql
|
||||
_measurement="sensorData"
|
||||
```
|
||||
|
||||
### Delete points with a specific field
|
||||
The following will delete points with the `temperature` field:
|
||||
|
||||
```sql
|
||||
_field="temperature"
|
||||
```
|
||||
|
||||
### Delete points with a specific tag set
|
||||
The following will delete points from the `prod-1.4` host in the `us-west` region:
|
||||
|
||||
```sql
|
||||
host="prod-1.4" AND region="us-west"
|
||||
```
|
||||
|
||||
## Limitations
|
||||
The delete predicate syntax has the following limitations.
|
||||
|
||||
- Delete predicates do not support regular expressions.
|
||||
- Delete predicates do not support the `OR` logical operator.
|
||||
- Delete predicates only support equality (`=`), not inequality (`!=`).
|
||||
- Delete predicates can use any column or tag except `_time` or `_value`.
|
|
@ -0,0 +1,38 @@
|
|||
---
|
||||
title: Flux syntax
|
||||
list_title: Flux
|
||||
description: >
|
||||
Flux is a functional data scripting language designed for querying, analyzing, and acting on data.
|
||||
menu:
|
||||
v2_0_ref:
|
||||
parent: Syntax
|
||||
name: Flux
|
||||
identifier: flux-syntax
|
||||
weight: 101
|
||||
v2.0/tags: [syntax, flux]
|
||||
---
|
||||
|
||||
Flux is a functional data scripting language designed for querying, analyzing, and acting on data.
|
||||
|
||||
## Flux design principles
|
||||
Flux takes a functional approach to data exploration and processing, but is designed
|
||||
to be usable, readable, flexible, composable, testable, contributable, and shareable.
|
||||
|
||||
The following example returns the the average CPU usage per minute over the last hour.
|
||||
|
||||
```js
|
||||
from(bucket:"example-bucket")
|
||||
|> range(start:-1h)
|
||||
|> filter(fn:(r) =>
|
||||
r._measurement == "cpu" and
|
||||
r.cpu == "cpu-total"
|
||||
)
|
||||
|> aggregateWindow(every: 1m, fn: mean)
|
||||
```
|
||||
|
||||
## Flux documentation
|
||||
For more information about Flux syntax, packages, and functions, see:
|
||||
|
||||
- [Get started with Flux](/v2.0/reference/flux/)
|
||||
- [Flux standard library](/v2.0/reference/flux/stdlib/)
|
||||
- [Flux language specification](/v2.0/reference/flux/language/)
|
|
@ -1,13 +1,17 @@
|
|||
---
|
||||
title: Line protocol reference
|
||||
list_title: Line protocol
|
||||
description: >
|
||||
InfluxDB uses line protocol to write data points.
|
||||
It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point.
|
||||
menu:
|
||||
v2_0_ref:
|
||||
parent: Syntax
|
||||
name: Line protocol
|
||||
weight: 6
|
||||
v2.0/tags: [write, line protocol]
|
||||
weight: 102
|
||||
v2.0/tags: [write, line protocol, syntax]
|
||||
aliases:
|
||||
- /v2.0/reference/line-protocol
|
||||
---
|
||||
|
||||
InfluxDB uses line protocol to write data points.
|
||||
|
@ -102,7 +106,7 @@ include the timestamp.
|
|||
|
||||
{{% note %}}
|
||||
_Use the default nanosecond precision timestamp or specify an alternative precision
|
||||
when [writing the data](/v2.0/write-data/#precision)._
|
||||
when [writing the data](/v2.0/write-data/#timestamp-precision)._
|
||||
{{% /note %}}
|
||||
|
||||
### Whitespace
|
|
@ -0,0 +1,160 @@
|
|||
---
|
||||
title: Enable TLS encryption
|
||||
seotitle: Enable TLS/SSL encryption
|
||||
description: >
|
||||
Enable Transport Layer Security (TLS) and use the HTTPS protocol to secure communication between clients and InfluxDB.
|
||||
weight: 101
|
||||
menu:
|
||||
v2_0:
|
||||
parent: Security & authorization
|
||||
v2.0/tags: [security, authentication, tls, https, ssl]
|
||||
---
|
||||
|
||||
Enabling HTTPS encrypts the communication between clients and the InfluxDB server.
|
||||
When configured with a signed certificate, HTTPS can also verify the authenticity of the InfluxDB server to connecting clients.
|
||||
|
||||
{{% warn %}}
|
||||
InfluxData [strongly recommends](/influxdb/v1.7/administration/security/) enabling HTTPS, especially if you plan on sending requests to InfluxDB over a network.
|
||||
{{% /warn %}}
|
||||
|
||||
## Requirements
|
||||
|
||||
To enable HTTPS with InfluxDB, you need a Transport Layer Security (TLS) certificate (also known as a Secured Sockets Layer (SSL) certificate).
|
||||
InfluxDB supports three types of TLS certificates:
|
||||
|
||||
### Single domain certificates signed by a Certificate Authority
|
||||
|
||||
Single domain certificates provide cryptographic security to HTTPS requests and allow clients to verify the identity of the InfluxDB server.
|
||||
These certificates are signed and issued by a trusted, third-party Certificate Authority (CA).
|
||||
With this certificate option, every InfluxDB instance requires a unique single domain certificate.
|
||||
|
||||
### Wildcard certificates signed by a Certificate Authority
|
||||
|
||||
Wildcard certificates provide cryptographic security to HTTPS requests and allow clients to verify the identity of the InfluxDB server.
|
||||
Wildcard certificates can be used across multiple InfluxDB instances on different servers.
|
||||
|
||||
### Self-signed certificates
|
||||
|
||||
Self-signed certificates are _not_ signed by a trusted, third-party CA.
|
||||
Unlike CA-signed certificates, self-signed certificates only provide cryptographic security to HTTPS requests.
|
||||
They do not allow clients to verify the identity of the InfluxDB server.
|
||||
With this certificate option, every InfluxDB instance requires a unique self-signed certificate.
|
||||
You can generate a self-signed certificate on your own machine.
|
||||
|
||||
<!-- InfluxDB supports certificates composed of a private key file (`.key`) and a signed certificate file (`.crt`) file pair, -->
|
||||
<!-- as well as certificates that combine the private key file and the signed certificate file into a single bundled file (`.pem`). -->
|
||||
|
||||
## Enable HTTPS with a CA-signed certificate
|
||||
|
||||
1. **Install the certificate**
|
||||
|
||||
Place the private key file (`.key`) and the signed certificate file (`.crt`) in the `/etc/ssl/` directory.
|
||||
(Other paths will also work.)
|
||||
|
||||
2. **Set certificate file permissions**
|
||||
|
||||
The user running InfluxDB must have read permissions on the TLS certificate.
|
||||
|
||||
{{% note %}}You may opt to set up multiple users, groups, and permissions.
|
||||
Ultimately, make sure all users running InfluxDB have read permissions for the TLS certificate.
|
||||
{{% /note %}}
|
||||
|
||||
Run the following command to give InfluxDB read and write permissions on the certificate files.
|
||||
|
||||
```bash
|
||||
sudo chmod 644 /etc/ssl/<CA-certificate-file>
|
||||
sudo chmod 600 /etc/ssl/<private-key-file>
|
||||
```
|
||||
|
||||
3. **Run `influxd` with TLS flags**
|
||||
|
||||
Start InfluxDB with TLS command line flags:
|
||||
|
||||
```bash
|
||||
influxd \
|
||||
--tls-cert "/etc/ssl/influxdb-selfsigned.crt" \
|
||||
--tls-key "/etc/ssl/influxdb-selfsigned.key"
|
||||
```
|
||||
|
||||
4. **Verify TLS connection**
|
||||
|
||||
Ensure you can connect over HTTPS by running
|
||||
|
||||
```
|
||||
curl -v https://influxdb:9999/api/v2/ping
|
||||
```
|
||||
|
||||
With this command, you should see output confirming a succussful TLS handshake.
|
||||
|
||||
## Enable HTTPS with a self-signed certificate
|
||||
|
||||
1. **Generate a self-signed certificate**
|
||||
|
||||
Use the `openssl` utility (preinstalled on many OSes) to create a certificate.
|
||||
The following command generates a private key file (`.key`) and a self-signed
|
||||
certificate file (`.crt`) which remain valid for the specified `NUMBER_OF_DAYS`.
|
||||
It outputs those files to `/etc/ssl/` and gives them the required permissions.
|
||||
(Other paths will also work.)
|
||||
|
||||
```bash
|
||||
sudo openssl req -x509 -nodes -newkey rsa:2048 \
|
||||
-keyout /etc/ssl/influxdb-selfsigned.key \
|
||||
-out /etc/ssl/influxdb-selfsigned.crt \
|
||||
-days <NUMBER_OF_DAYS>
|
||||
```
|
||||
|
||||
When you execute the command, it will prompt you for more information.
|
||||
You can choose to fill out that information or leave it blank; both actions generate valid certificate files.
|
||||
|
||||
2. **Run `influxd` with TLS flags**
|
||||
|
||||
Start InfluxDB with TLS command line flags:
|
||||
|
||||
```bash
|
||||
influxd \
|
||||
--tls-cert "/etc/ssl/influxdb-selfsigned.crt" \
|
||||
--tls-key "/etc/ssl/influxdb-selfsigned.key"
|
||||
```
|
||||
|
||||
3. **Verify TLS connection**
|
||||
|
||||
Ensure you can connect over HTTPS by running
|
||||
|
||||
```
|
||||
curl -vk https://influxdb:9999/api/v2/ping
|
||||
```
|
||||
|
||||
With this command, you should see output confirming a succussful TLS handshake.
|
||||
|
||||
## Connect Telegraf to a secured InfluxDB instance
|
||||
|
||||
To connect [Telegraf](/telegraf/latest/) to an InfluxDB 2.0 instance with TLS enabled,
|
||||
update the following `influxdb_v2` output settings in your Telegraf configuration file:
|
||||
|
||||
- Update urls to use https instead of http.
|
||||
- If using a self-signed certificate, uncomment and set `insecure_skip_verify` to true.
|
||||
|
||||
### Example configuration
|
||||
|
||||
```toml
|
||||
###############################################################################
|
||||
# OUTPUT PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# Configuration for sending metrics to InfluxDB
|
||||
[[outputs.influxdb_v2]]
|
||||
## The URLs of the InfluxDB cluster nodes.
|
||||
##
|
||||
## Multiple URLs can be specified for a single cluster, only ONE of the
|
||||
## urls will be written to each interval.
|
||||
urls = ["https://127.0.0.1:9999"]
|
||||
|
||||
[...]
|
||||
|
||||
## Optional TLS Config for use on HTTP connections.
|
||||
[...]
|
||||
## Use TLS but skip chain & host verification
|
||||
insecure_skip_verify = true
|
||||
```
|
||||
|
||||
Restart Telegraf using the updated configuration file.
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: Store secrets in Vault
|
||||
description: Manage secrets in InfluxDB using the InfluxDB UI or the influx CLI.
|
||||
description: Use Vault as an InfluxDB secret store and manage secrets through the in InfluxDB API.
|
||||
v2.0/tags: [secrets, security]
|
||||
menu:
|
||||
v2_0:
|
||||
|
@ -41,32 +41,52 @@ For this example, install Vault on your local machine and start a Vault dev serv
|
|||
vault server -dev
|
||||
```
|
||||
|
||||
## Define Vault environment variables
|
||||
## Provide Vault server address and token
|
||||
|
||||
Use [Vault environment variables](https://www.vaultproject.io/docs/commands/index.html#environment-variables)
|
||||
Use `influxd` Vault-related tags or [Vault environment variables](https://www.vaultproject.io/docs/commands/index.html#environment-variables)
|
||||
to provide connection credentials and other important Vault-related information to InfluxDB.
|
||||
|
||||
#### Required environment variables
|
||||
### Required credentials
|
||||
|
||||
- `VAULT_ADDR`: The API address of your Vault server _(provided in the Vault server output)_.
|
||||
- `VAULT_TOKEN`: The [Vault token](https://learn.hashicorp.com/vault/getting-started/authentication)
|
||||
required to access your Vault server.
|
||||
#### Vault address
|
||||
Provide the API address of your Vault server _(available in the Vault server output)_
|
||||
using the [`--vault-addr` flag](/v2.0/reference/config-options/#vault-addr) when
|
||||
starting `influxd` or with the `VAULT_ADDR` environment variable.
|
||||
|
||||
_Your Vault server configuration may require other environment variables._
|
||||
#### Vault token
|
||||
Provide your [Vault token](https://learn.hashicorp.com/vault/getting-started/authentication)
|
||||
(required to access your Vault server) using the [`--vault-token` flag](/v2.0/reference/config-options/#vault-token)
|
||||
when starting `influxd` or with the `VAULT_TOKEN` environment variable.
|
||||
|
||||
```sh
|
||||
export VAULT_ADDR='http://127.0.0.1:8200' VAULT_TOKEN='s.0X0XxXXx0xXxXXxxxXxXxX0x'
|
||||
```
|
||||
_Your Vault server configuration may require other Vault settings._
|
||||
|
||||
## Start InfluxDB
|
||||
|
||||
Start the [`influxd` service](/v2.0/reference/cli/influxd/) with the `--secret-store`
|
||||
option set to `vault`.
|
||||
option set to `vault` any other necessary flags.
|
||||
|
||||
```bash
|
||||
influxd --secret-store vault
|
||||
influxd --secret-store vault \
|
||||
--vault-addr=http://127.0.0.1:8200 \
|
||||
--vault-token=s.0X0XxXXx0xXxXXxxxXxXxX0x
|
||||
```
|
||||
|
||||
## Manage tokens through the InfluxDB API
|
||||
`influxd` includes the following Vault configuration options.
|
||||
If set, these flags override any [Vault environment variables](https://www.vaultproject.io/docs/commands/index.html#environment-variables):
|
||||
|
||||
- `--vault-addr`
|
||||
- `--vault-cacert`
|
||||
- `--vault-capath`
|
||||
- `--vault-client-cert`
|
||||
- `--vault-client-key`
|
||||
- `--vault-max-retries`
|
||||
- `--vault-client-timeout`
|
||||
- `--vault-skip-verify`
|
||||
- `--vault-tls-server-name`
|
||||
- `--vault-token`
|
||||
|
||||
For more information, see [InfluxDB configuration options](/v2.0/reference/config-options/).
|
||||
|
||||
## Manage secrets through the InfluxDB API
|
||||
Use the InfluxDB `/org/{orgID}/secrets` API endpoint to add tokens to Vault.
|
||||
For details, see [Manage secrets](/v2.0/security/secrets/manage-secrets/).
|
||||
|
|
|
@ -9,7 +9,7 @@ menu:
|
|||
v2_0:
|
||||
name: Manage tokens
|
||||
parent: Security & authorization
|
||||
weight: 101
|
||||
weight: 103
|
||||
---
|
||||
|
||||
InfluxDB ensures secure interaction between users and data through the use of **authentication tokens**.
|
||||
|
|
|
@ -35,7 +35,7 @@ See [Get started with Flux](/v2.0/query-data/get-started) to learn more about Fl
|
|||
To switch back to the query builder, click **Query Builder**. Note that your updates from the Script Editor will not be saved.
|
||||
3. Use the **Functions** list to review the available Flux functions.
|
||||
Click on a function from the list to add it to your query.
|
||||
4. Click **Submit** to run your query. You can then preview your graph in the above pane.
|
||||
4. Click **Submit** (or press `Control+Enter`) to run your query. You can then preview your graph in the above pane.
|
||||
5. To work on multiple queries at once, click the {{< icon "plus" >}} to add another tab.
|
||||
* Click the eye icon on a tab to hide or show a query's visualization.
|
||||
* Click on the name of the query in the tab to rename it.
|
||||
|
@ -89,6 +89,10 @@ The default time range is 5m.
|
|||
|
||||
Click **Query Builder** to use the builder to create a Flux query. Click **Script Editor** to manually edit the query.
|
||||
|
||||
#### Add comments to your script
|
||||
|
||||
In **Script Editor** mode, press `Control+/` to toggle between commenting a line out.
|
||||
|
||||
## Save your query as a dashboard cell or task
|
||||
|
||||
**To save your query**:
|
||||
|
|
|
@ -11,7 +11,7 @@ menu:
|
|||
v2.0/tags: [write, line protocol]
|
||||
---
|
||||
|
||||
Collect and write time series data to InfluxDB using [line protocol](/v2.0/reference/line-protocol),
|
||||
Collect and write time series data to InfluxDB using [line protocol](/v2.0/reference/syntax/line-protocol),
|
||||
Telegraf, data scrapers, the InfluxDB v2 API, `influx` command line interface (CLI),
|
||||
the InfluxDB user interface (UI), and client libraries.
|
||||
|
||||
|
@ -40,9 +40,9 @@ The [InfluxDB setup process](/v2.0/get-started/#set-up-influxdb) creates each of
|
|||
|
||||
Use _line protocol_ format to write data into InfluxDB.
|
||||
Each line represents a data point.
|
||||
Each point requires a [*measurement*](/v2.0/reference/line-protocol/#measurement)
|
||||
and [*field set*](/v2.0/reference/line-protocol/#field-set) and may also include
|
||||
a [*tag set*](/v2.0/reference/line-protocol/#tag-set) and a [*timestamp*](/v2.0/reference/line-protocol/#timestamp).
|
||||
Each point requires a [*measurement*](/v2.0/reference/syntax/line-protocol/#measurement)
|
||||
and [*field set*](/v2.0/reference/syntax/line-protocol/#field-set) and may also include
|
||||
a [*tag set*](/v2.0/reference/syntax/line-protocol/#tag-set) and a [*timestamp*](/v2.0/reference/syntax/line-protocol/#timestamp).
|
||||
|
||||
Line protocol data looks like this:
|
||||
|
||||
|
@ -52,12 +52,14 @@ cpu,host=host1 usage_user=3.8234,usage_system=4.23874 1556892726597397000
|
|||
mem,host=host1 used_percent=21.83599203 1556892777007291000
|
||||
```
|
||||
|
||||
#### Timestamp precision
|
||||
Timestamps are essential in InfluxDB.
|
||||
If a data point does not include a timestamp when it is received by the database, InfluxDB uses the current system time (UTC) of its host machine.
|
||||
If a data point does not include a timestamp when it is received by the database,
|
||||
InfluxDB uses the current system time (UTC) of its host machine.
|
||||
|
||||
The default precision for timestamps is in nanoseconds.
|
||||
If the precision of the timestamps is anything other than nanoseconds (`ns`),
|
||||
you must specify the precision in your write request.
|
||||
you must specify the precision in your [write request](#ways-to-write-data-into-influxdb).
|
||||
InfluxDB accepts the following precisions:
|
||||
|
||||
- `ns` - Nanoseconds
|
||||
|
@ -65,7 +67,7 @@ InfluxDB accepts the following precisions:
|
|||
- `ms` - Milliseconds
|
||||
- `s` - Seconds
|
||||
|
||||
_For more details about line protocol, see the [Line protocol reference](/v2.0/reference/line-protocol) and [Best practices for writing data](/v2.0/write-data/best-practices/)._
|
||||
_For more details about line protocol, see the [Line protocol reference](/v2.0/reference/syntax/line-protocol) and [Best practices for writing data](/v2.0/write-data/best-practices/)._
|
||||
|
||||
## Ways to write data into InfluxDB
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ v2.0/tags: [best practices, write]
|
|||
---
|
||||
|
||||
InfluxDB identifies unique data points by their measurement, tag set, and timestamp
|
||||
(each a part of [Line protocol](/v2.0/reference/line-protocol) used to write data to InfluxDB).
|
||||
(each a part of [Line protocol](/v2.0/reference/syntax/line-protocol) used to write data to InfluxDB).
|
||||
|
||||
```txt
|
||||
web,host=host2,region=us_west firstByte=15.0 1559260800000000000
|
||||
|
|
|
@ -23,7 +23,7 @@ The following tools write to InfluxDB and employ write optimizations by default:
|
|||
|
||||
## Batch writes
|
||||
|
||||
Write data in batches to Minimize network overhead when writing data to InfluxDB.
|
||||
Write data in batches to minimize network overhead when writing data to InfluxDB.
|
||||
|
||||
{{% note %}}
|
||||
The optimal batch size is 5000 lines of line protocol.
|
||||
|
@ -44,12 +44,11 @@ measurement,tagA=i,tagB=think,tagC=therefore,tagD=i,tagE=am fieldKey=fieldValue
|
|||
|
||||
## Use the coarsest time precision possible
|
||||
|
||||
InfluxDB lets you write data in nanosecond precision, however if data isn't
|
||||
collected in nanoseconds, there is no need to write at that precision.
|
||||
Using the coarsest precision possible for timestamps can result in significant
|
||||
compression improvements.
|
||||
By default, InfluxDB writes data in nanosecond precision.
|
||||
However if your data isn't collected in nanoseconds, there is no need to write at that precision.
|
||||
For better performance, use the coarsest precision possible for timestamps.
|
||||
|
||||
_Specify timestamp precision when [writing to InfluxDB](/v2.0/write-data/#precision)._
|
||||
_Specify timestamp precision when [writing to InfluxDB](/v2.0/write-data/#timestamp-precision)._
|
||||
|
||||
## Synchronize hosts with NTP
|
||||
|
||||
|
|
Loading…
Reference in New Issue