diff --git a/.circleci/config.yml b/.circleci/config.yml
index 1dd82d7b1..f3bf2c1a3 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -4,7 +4,7 @@ jobs:
docker:
- image: circleci/node:latest
environment:
- HUGO_VERSION: "0.55.1"
+ HUGO_VERSION: "0.56.3"
S3DEPLOY_VERSION: "2.3.2"
steps:
- checkout
@@ -23,7 +23,10 @@ jobs:
command: ./deploy/ci-install-s3deploy.sh
- run:
name: Install NPM dependencies
- command: sudo npm i -g postcss-cli autoprefixer
+ command: sudo npm i -g postcss-cli autoprefixer redoc-cli
+ - run:
+ name: Generate API documentation
+ command: cd api-docs && bash generate-api-docs.sh
- save_cache:
key: install-v1-{{ checksum ".circleci/config.yml" }}
paths:
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..f94d686d9
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,17 @@
+_Describe the issue here._
+
+##### Relevant URLs
+- _Provide relevant URLs_
+
+##### What products and version are you using?
+
+
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 000000000..855a5991f
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,8 @@
+Closes #
+
+_Describe your proposed changes here._
+
+- [ ] Signed the [InfluxData CLA](https://www.influxdata.com/legal/cla/)
+ ([if necessary](https://github.com/influxdata/docs-v2/blob/master/CONTRIBUTING.md#sign-the-influxdata-cla))
+- [ ] Tests pass (no build errors)
+- [ ] Rebased/mergeable
diff --git a/.github/SECURITY.md b/.github/SECURITY.md
new file mode 100644
index 000000000..79d053066
--- /dev/null
+++ b/.github/SECURITY.md
@@ -0,0 +1,11 @@
+# Security Policy
+
+## Reporting a Vulnerability
+
+Reporting a Vulnerability
+
+InfluxData takes security and our users' trust very seriously.
+If you believe you have found a security issue in any of our open source projects,
+please responsibly disclose it by contacting security@influxdata.com.
+More details about security vulnerability reporting, including our GPG key,
+can be found here. https://www.influxdata.com/how-to-report-security-vulnerabilities/
diff --git a/.gitignore b/.gitignore
index 07f198c85..b69d13c23 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,3 +5,4 @@ public
node_modules
*.log
/resources
+/content/**/api.html
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a4ead8b86..68b3605f5 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -3,9 +3,12 @@
## Sign the InfluxData CLA
The InfluxData Contributor License Agreement (CLA) is part of the legal framework
for the open-source ecosystem that protects both you and InfluxData.
-In order to contribute to any InfluxData project, you must first sign the CLA.
+To make substantial contributions to InfluxData documentation, first sign the InfluxData CLA.
+What constitutes a "substantial" change is at the discretion of InfluxData documentation maintainers.
-[Sign the InfluxData (CLA)](https://www.influxdata.com/legal/cla/)
+[Sign the InfluxData CLA](https://www.influxdata.com/legal/cla/)
+
+_**Note:** Typo and broken link fixes are greatly appreciated and do not require signing the CLA._
## Make suggested updates
@@ -59,13 +62,16 @@ menu:
v2_0:
name: # Article name that only appears in the left nav
parent: # Specifies a parent group and nests navigation items
-weight: # Determines sort order in both the nav tree and in article lists.
+weight: # Determines sort order in both the nav tree and in article lists
draft: # If true, will not render page on build
enterprise_all: # If true, specifies the doc as a whole is specific to InfluxDB Enterprise
enterprise_some: # If true, specifies the doc includes some content specific to InfluxDB Enterprise
cloud_all: # If true, specifies the doc as a whole is specific to InfluxDB Cloud
cloud_some: # If true, specifies the doc includes some content specific to InfluxDB Cloud
v2.x/tags: # Tags specific to each version (replace .x" with the appropriate minor version )
+related: # Creates links to specific internal and external content at the bottom of the page
+ - /path/to/related/article
+ - https://external-link.com, This is an external link
```
#### Title usage
@@ -199,6 +205,17 @@ Insert Cloud-specific markdown content here.
{{% /cloud %}}
```
+#### InfluxDB Cloud content block
+The `{{ cloud-msg }}` shortcode creates a highlighted block of text specific to
+InfluxDB Cloud meant to stand out from the rest of the article content.
+It's format is similar to note and warning blocks.
+
+```md
+{{% cloud-msg %}}
+Insert Cloud-specific markdown content here.
+{{% /cloud-msg %}}
+```
+
#### InfluxDB Cloud name
The name used to refer to InfluxData's cloud offering is subject to change.
To facilitate easy updates in the future, use the `cloud-name` short-code when
@@ -310,6 +327,20 @@ WHERE time > now() - 15m
{{< /code-tabs-wrapper >}}
~~~
+### Related content
+Use the `related` frontmatter to include links to specific articles at the bottom of an article.
+
+- If the page exists inside of this documentation, just include the path to the page.
+ It will automatically detect the title of the page.
+- If the page exists outside of this documentation, include the full URL and a title for the link.
+ The link and title must be in that order and must be separated by a comma and a space.
+
+```yaml
+related:
+ - /v2.0/write-data/quick-start
+ - https://influxdata.com, This is an external link
+```
+
### High-resolution images
In many cases, screenshots included in the docs are taken from high-resolution (retina) screens.
Because of this, the actual pixel dimension is 2x larger than it needs to be and is rendered 2x bigger than it should be.
@@ -389,15 +420,20 @@ Below is a list of available icons (some are aliases):
- dashboard
- dashboards
- data-explorer
+- delete
- download
- duplicate
- edit
- expand
- export
+- eye
+- eye-closed
+- eye-open
- feedback
- fullscreen
- gear
- graph
+- hide
- influx
- influx-icon
- nav-admin
@@ -422,7 +458,11 @@ Below is a list of available icons (some are aliases):
- search
- settings
- tasks
+- toggle
+- trash
+- trashcan
- triangle
+- view
- wrench
- x
@@ -436,12 +476,15 @@ Provide a visual example of the the navigation item using the `nav-icon` shortco
The following case insensitive values are supported:
-- admin
-- data explorer, data-explorer
+- admin, influx
+- data-explorer, data explorer
- dashboards
- tasks
-- organizations, orgs
-- configuration, config
+- monitor, alerts, bell
+- cloud, usage
+- disks, load data, load-data
+- settings
+- feedback
### InfluxDB UI notification messages
In some cases, documentation references a notification message that appears in
@@ -491,6 +534,13 @@ menu:
### Image naming conventions
Save images using the following naming format: `version-context-description.png`. For example, `2-0-visualizations-line-graph.png` or `2-0-tasks-add-new.png`. Specify a version other than 2.0 only if the image is specific to that version.
+## InfluxDB API documentation
+InfluxData uses [Redoc](https://github.com/Redocly/redoc) to generate the full
+InfluxDB API documentation when documentation is deployed.
+Redoc generates HTML documentation using the InfluxDB `swagger.yml`.
+For more information about generating InfluxDB API documentation, see the
+[API Documentation README](https://github.com/influxdata/docs-v2/tree/master/api-docs#readme).
+
## New Versions of InfluxDB
Version bumps occur regularly in the documentation.
Each minor version has its own directory with unique content.
@@ -536,7 +586,10 @@ _This example assumes v2.0 is the most recent version and v2.1 is the new versio
latest_version: v2.1
```
-7. Commit the changes and push the new branch to Github.
+7. Copy the InfluxDB `swagger.yml` specific to the new version into the
+ `/api-docs/v/` directory.
+
+8. Commit the changes and push the new branch to Github.
These changes lay the foundation for the new version.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 000000000..c8dcb2a9f
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2019 InfluxData, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
index 422d5004c..9a026fc29 100644
--- a/README.md
+++ b/README.md
@@ -1,38 +1,50 @@
+
+
+
+
# InfluxDB 2.0 Documentation
This repository contains the InfluxDB 2.x documentation published at [docs.influxdata.com](https://docs.influxdata.com).
## Contributing
-We welcome and encourage community contributions. For information about contributing to the InfluxData documentation, see [Contribution guidelines](CONTRIBUTING.md).
+We welcome and encourage community contributions.
+For information about contributing to the InfluxData documentation, see [Contribution guidelines](CONTRIBUTING.md).
-## Run the docs locally
-The InfluxData documentation uses [Hugo](https://gohugo.io/), a static site
-generator built in Go.
+## Reporting a Vulnerability
+InfluxData takes security and our users' trust very seriously.
+If you believe you have found a security issue in any of our open source projects,
+please responsibly disclose it by contacting security@influxdata.com.
+More details about security vulnerability reporting,
+including our GPG key, can be found at https://www.influxdata.com/how-to-report-security-vulnerabilities/.
-### Clone this repository
-[Clone this repository](https://help.github.com/articles/cloning-a-repository/)
-to your local machine.
+## Running the docs locally
-### Install Hugo
-See the Hugo documentation for information about how to
-[download and install Hugo](https://gohugo.io/getting-started/installing/).
+1. [**Clone this repository**](https://help.github.com/articles/cloning-a-repository/) to your local machine.
-### Install NodeJS & Asset Pipeline Tools
-This project uses tools written in NodeJS to build and process stylesheets and javascript.
-In order for assets to build correctly, [install NodeJS](https://nodejs.org/en/download/)
-and run the following command to install the necessary tools:
+2. **Install Hugo**
-```sh
-npm i -g postcss-cli autoprefixer
-```
+ The InfluxData documentation uses [Hugo](https://gohugo.io/), a static site generator built in Go.
+ See the Hugo documentation for information about how to [download and install Hugo](https://gohugo.io/getting-started/installing/).
-### Start the hugo server
-Hugo provides a local development server that generates the HTML pages, builds
-the static assets, and serves them at `localhost:1313`.
+3. **Install NodeJS & Asset Pipeline Tools**
-Start the hugo server with:
+ This project uses tools written in NodeJS to build and process stylesheets and javascript.
+ In order for assets to build correctly, [install NodeJS](https://nodejs.org/en/download/)
+ and run the following command to install the necessary tools:
-```bash
-hugo server
-```
+ ```
+ npm i -g postcss-cli autoprefixer
+ ```
-View the docs at [localhost:1313](http://localhost:1313).
+4. **Start the Hugo server**
+
+ Hugo provides a local development server that generates the HTML pages, builds
+ the static assets, and serves them at `localhost:1313`.
+
+ Start the Hugo server from the repository:
+
+ ```
+ $ cd docs-v2/
+ $ hugo server
+ ```
+
+ View the docs at [localhost:1313](http://localhost:1313).
diff --git a/api-docs/README.md b/api-docs/README.md
new file mode 100644
index 000000000..ec7a76e16
--- /dev/null
+++ b/api-docs/README.md
@@ -0,0 +1,37 @@
+## Generate InfluxDB API docs
+InfluxDB uses [Redoc](https://github.com/Redocly/redoc/) and
+[redoc-cli](https://github.com/Redocly/redoc/blob/master/cli/README.md) to generate
+API documentation from the InfluxDB `swagger.yml`.
+
+To minimize repo size, the generated API documentation HTML is gitignored, therefore
+not committed directly to the docs repo.
+The InfluxDB docs deployment process uses swagger files in the `api-docs` directory
+to generate version-specific API documentation.
+
+### Versioned swagger files
+Structure versions swagger files using the following pattern:
+
+```
+api-docs/
+ ├── v2.0/
+ │ └── swagger.yml
+ ├── v2.1/
+ │ └── swagger.yml
+ ├── v2.2/
+ │ └── swagger.yml
+ └── etc...
+```
+
+### Generate API docs locally
+Because the API documentation HTML is gitignored, you must manually generate it
+to view the API docs locally.
+
+From the root of the docs repo, run:
+
+```sh
+# Install redoc-cli
+npm install -g redoc-cli
+
+# Generate the API docs
+cd api-docs && generate-api-docs.sh
+```
diff --git a/api-docs/generate-api-docs.sh b/api-docs/generate-api-docs.sh
new file mode 100644
index 000000000..1f80e8a21
--- /dev/null
+++ b/api-docs/generate-api-docs.sh
@@ -0,0 +1,42 @@
+#!/bin/bash -e
+
+# Get list of versions from directory names
+versions="$(ls -d -- */)"
+
+for version in $versions
+do
+ # Trim the trailing slash off the directory name
+ version="${version%/}"
+ menu="${version//./_}_ref"
+
+ # Generate the frontmatter
+ frontmatter="---
+title: InfluxDB $version API documentation
+description: >
+ The InfluxDB API provides a programmatic interface for interactions with InfluxDB $version.
+layout: api
+menu:
+ $menu:
+ parent: InfluxDB v2 API
+ name: View full API docs
+weight: 102
+---
+"
+
+ # Use Redoc to generate the API html
+ redoc-cli bundle -t template.hbs \
+ --title="InfluxDB $version API documentation" \
+ --options.sortPropsAlphabetically \
+ --options.menuToggle \
+ --options.hideHostname \
+ --templateOptions.version="$version" \
+ $version/swagger.yml
+
+ # Create temp file with frontmatter and Redoc html
+ echo "$frontmatter" >> $version.tmp
+ cat redoc-static.html >> $version.tmp
+
+ # Remove redoc file and move the tmp file to it's proper place
+ rm -f redoc-static.html
+ mv $version.tmp ../content/$version/api.html
+done
diff --git a/api-docs/template.hbs b/api-docs/template.hbs
new file mode 100644
index 000000000..b63535da2
--- /dev/null
+++ b/api-docs/template.hbs
@@ -0,0 +1,52 @@
+
+
+
+
+
+ {{title}}
+
+
+
+
+
+
+
+
+
+ {{#unless disableGoogleFont}}{{/unless}}
+ {{{redocHead}}}
+
+
+
+
+
+ {{{redocHTML}}}
+
+
+
+
diff --git a/api-docs/v2.0/swagger.yml b/api-docs/v2.0/swagger.yml
new file mode 100644
index 000000000..a74555930
--- /dev/null
+++ b/api-docs/v2.0/swagger.yml
@@ -0,0 +1,9851 @@
+openapi: "3.0.0"
+info:
+ title: Influx API Service
+ version: 0.1.0
+servers:
+ - url: /api/v2
+paths:
+ /signin:
+ post:
+ operationId: PostSignin
+ summary: Exchange basic auth credentials for session
+ security:
+ - BasicAuth: []
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ responses:
+ '204':
+ description: Successfully authenticated
+ '401':
+ description: Unauthorized access
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '403':
+ description: user account is disabled
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unsuccessful authentication
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /signout:
+ post:
+ operationId: PostSignout
+ summary: Expire the current session
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ responses:
+ '204':
+ description: Session successfully expired
+ '401':
+ description: Unauthorized access
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unsuccessful session expiry
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /:
+ get:
+ operationId: GetRoutes
+ summary: Map of all top level routes available
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ responses:
+ default:
+ description: All routes
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Routes"
+ /setup:
+ get:
+ operationId: GetSetup
+ tags:
+ - Setup
+ summary: Check if database has default user, org, bucket
+ description: Returns `true` if no default user, organization, or bucket has been created.
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ responses:
+ '200':
+ description:
+ allowed true or false
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/IsOnboarding"
+ post:
+ operationId: PostSetup
+ tags:
+ - Setup
+ summary: Set up initial user, org and bucket
+ description: Post an onboarding request to set up initial user, org and bucket.
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ requestBody:
+ description: Source to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/OnboardingRequest"
+ responses:
+ '201':
+ description: Created default user, bucket, org
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/OnboardingResponse"
+ /documents/templates:
+ get:
+ operationId: GetDocumentsTemplates
+ tags:
+ - Templates
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: query
+ name: org
+ description: Specifies the name of the organization of the template.
+ schema:
+ type: string
+ - in: query
+ name: orgID
+ description: Specifies the organization ID of the template.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: A list of template documents
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Documents"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostDocumentsTemplates
+ tags:
+ - Templates
+ summary: Create a template
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ requestBody:
+ description: Template that will be created
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/DocumentCreate"
+ responses:
+ '201':
+ description: Template created
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Document"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/documents/templates/{templateID}':
+ get:
+ operationId: GetDocumentsTemplatesID
+ tags:
+ - Templates
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: templateID
+ schema:
+ type: string
+ required: true
+ description: The template ID.
+ responses:
+ '200':
+ description: The template requested
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Document"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ put:
+ operationId: PutDocumentsTemplatesID
+ tags:
+ - Templates
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: templateID
+ schema:
+ type: string
+ required: true
+ description: The template ID.
+ requestBody:
+ description: Template that will be updated
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/DocumentUpdate"
+ responses:
+ '200':
+ description: The newly updated template
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Document"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteDocumentsTemplatesID
+ tags:
+ - Templates
+ summary: Delete a template
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: templateID
+ schema:
+ type: string
+ required: true
+ description: The template ID.
+ responses:
+ '204':
+ description: Delete has been accepted
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/documents/templates/{templateID}/labels':
+ get:
+ operationId: GetDocumentsTemplatesIDLabels
+ tags:
+ - Templates
+ summary: List all labels for a template
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: templateID
+ schema:
+ type: string
+ required: true
+ description: The template ID.
+ responses:
+ '200':
+ description: A list of all labels for a template
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelsResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostDocumentsTemplatesIDLabels
+ tags:
+ - Templates
+ summary: Add a label to a template
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: templateID
+ schema:
+ type: string
+ required: true
+ description: The template ID.
+ requestBody:
+ description: Label to add
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelMapping"
+ responses:
+ '201':
+ description: The label added to the template
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/documents/templates/{templateID}/labels/{labelID}':
+ delete:
+ operationId: DeleteDocumentsTemplatesIDLabelsID
+ tags:
+ - Templates
+ summary: Delete a label from a template
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: templateID
+ schema:
+ type: string
+ required: true
+ description: The template ID.
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The label ID.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Template not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /telegrafs:
+ get:
+ operationId: GetTelegrafs
+ tags:
+ - Telegrafs
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: query
+ name: orgID
+ description: The organization ID the Telegraf config belongs to.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: A list of Telegraf configs
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Telegrafs"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostTelegrafs
+ tags:
+ - Telegrafs
+ summary: Create a Telegraf config
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ requestBody:
+ description: Telegraf config to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/TelegrafRequest"
+ responses:
+ '201':
+ description: Telegraf config created
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Telegraf"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/telegrafs/{telegrafID}':
+ get:
+ operationId: GetTelegrafsID
+ tags:
+ - Telegrafs
+ summary: Retrieve a Telegraf config
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: telegrafID
+ schema:
+ type: string
+ required: true
+ description: The Telegraf config ID.
+ - in: header
+ name: Accept
+ required: false
+ schema:
+ type: string
+ default: application/toml
+ enum:
+ - application/toml
+ - application/json
+ - application/octet-stream
+ responses:
+ '200':
+ description: Telegraf config details
+ content:
+ application/toml:
+ example: "[agent]\ninterval = \"10s\""
+ schema:
+ type: string
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Telegraf"
+ application/octet-stream:
+ example: "[agent]\ninterval = \"10s\""
+ schema:
+ type: string
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ put:
+ operationId: PutTelegrafsID
+ tags:
+ - Telegrafs
+ summary: Update a Telegraf config
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: telegrafID
+ schema:
+ type: string
+ required: true
+ description: The Telegraf config ID.
+ requestBody:
+ description: Telegraf config update to apply
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/TelegrafRequest"
+ responses:
+ '200':
+ description: An updated Telegraf config
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Telegraf"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteTelegrafsID
+ tags:
+ - Telegrafs
+ summary: Delete a Telegraf config
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: telegrafID
+ schema:
+ type: string
+ required: true
+ description: The Telegraf config ID.
+ responses:
+ '204':
+ description: Delete has been accepted
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/telegrafs/{telegrafID}/labels':
+ get:
+ operationId: GetTelegrafsIDLabels
+ tags:
+ - Telegrafs
+ summary: List all labels for a Telegraf config
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: telegrafID
+ schema:
+ type: string
+ required: true
+ description: The Telegraf config ID.
+ responses:
+ '200':
+ description: A list of all labels for a Telegraf config
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelsResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostTelegrafsIDLabels
+ tags:
+ - Telegrafs
+ summary: Add a label to a Telegraf config
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: telegrafID
+ schema:
+ type: string
+ required: true
+ description: The Telegraf config ID.
+ requestBody:
+ description: Label to add
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelMapping"
+ responses:
+ '201':
+ description: The label added to the Telegraf config
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/telegrafs/{telegrafID}/labels/{labelID}':
+ delete:
+ operationId: DeleteTelegrafsIDLabelsID
+ tags:
+ - Telegrafs
+ summary: Delete a label from a Telegraf config
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: telegrafID
+ schema:
+ type: string
+ required: true
+ description: The Telegraf config ID.
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The label ID.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Telegraf config not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/telegrafs/{telegrafID}/members':
+ get:
+ operationId: GetTelegrafsIDMembers
+ tags:
+ - Users
+ - Telegrafs
+ summary: List all users with member privileges for a Telegraf config
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: telegrafID
+ schema:
+ type: string
+ required: true
+ description: The Telegraf config ID.
+ responses:
+ '200':
+ description: A list of Telegraf config members
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceMembers"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostTelegrafsIDMembers
+ tags:
+ - Users
+ - Telegrafs
+ summary: Add a member to a Telegraf config
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: telegrafID
+ schema:
+ type: string
+ required: true
+ description: The Telegraf config ID.
+ requestBody:
+ description: User to add as member
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AddResourceMemberRequestBody"
+ responses:
+ '201':
+ description: Member added to Telegraf config
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceMember"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/telegrafs/{telegrafID}/members/{userID}':
+ delete:
+ operationId: DeleteTelegrafsIDMembersID
+ tags:
+ - Users
+ - Telegrafs
+ summary: Remove a member from a Telegraf config
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of the member to remove.
+ - in: path
+ name: telegrafID
+ schema:
+ type: string
+ required: true
+ description: The Telegraf config ID.
+ responses:
+ '204':
+ description: Member removed
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/telegrafs/{telegrafID}/owners':
+ get:
+ operationId: GetTelegrafsIDOwners
+ tags:
+ - Users
+ - Telegrafs
+ summary: List all owners of a Telegraf config
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: telegrafID
+ schema:
+ type: string
+ required: true
+ description: The Telegraf config ID.
+ responses:
+ '200':
+ description: A list of Telegraf config owners
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceOwners"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostTelegrafsIDOwners
+ tags:
+ - Users
+ - Telegrafs
+ summary: Add an owner to a Telegraf config
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: telegrafID
+ schema:
+ type: string
+ required: true
+ description: The Telegraf config ID.
+ requestBody:
+ description: User to add as owner
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AddResourceMemberRequestBody"
+ responses:
+ '201':
+ description: Telegraf config owner added
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceOwner"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/telegrafs/{telegrafID}/owners/{userID}':
+ delete:
+ operationId: DeleteTelegrafsIDOwnersID
+ tags:
+ - Users
+ - Telegrafs
+ summary: Remove an owner from a Telegraf config
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of the owner to remove.
+ - in: path
+ name: telegrafID
+ schema:
+ type: string
+ required: true
+ description: The Telegraf config ID.
+ responses:
+ '204':
+ description: Owner removed
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /scrapers:
+ get:
+ operationId: GetScrapers
+ tags:
+ - ScraperTargets
+ summary: Get all scraper targets
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: query
+ name: name
+ description: Specifies the name of the scraper target.
+ schema:
+ type: string
+ - in: query
+ name: id
+ description: List of scraper target IDs to return. If both `id` and `owner` are specified, only `id` is used.
+ schema:
+ type: array
+ items:
+ type: string
+ - in: query
+ name: orgID
+ description: Specifies the organization ID of the scraper target.
+ schema:
+ type: string
+ - in: query
+ name: org
+ description: Specifies the organization name of the scraper target.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: All scraper targets
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ScraperTargetResponses"
+ post:
+ operationId: PostScrapers
+ summary: Create a scraper target
+ tags:
+ - ScraperTargets
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ requestBody:
+ description: Scraper target to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ScraperTargetRequest"
+ responses:
+ '201':
+ description: Scraper target created
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ScraperTargetResponse"
+ default:
+ description: Internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/scrapers/{scraperTargetID}':
+ get:
+ operationId: GetScrapersID
+ tags:
+ - ScraperTargets
+ summary: Get a scraper target by ID
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: scraperTargetID
+ required: true
+ schema:
+ type: string
+ description: The scraper target ID.
+ responses:
+ '200':
+ description: Scraper target updated
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ScraperTargetResponse"
+ default:
+ description: Internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteScrapersID
+ tags:
+ - ScraperTargets
+ summary: Delete a scraper target
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: scraperTargetID
+ required: true
+ schema:
+ type: string
+ description: The scraper target ID.
+ responses:
+ '204':
+ description: Scraper target deleted
+ default:
+ description: Internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchScrapersID
+ summary: Update a scraper target
+ tags:
+ - ScraperTargets
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: scraperTargetID
+ required: true
+ schema:
+ type: string
+ description: The scraper target ID.
+ requestBody:
+ description: Scraper target update to apply
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ScraperTargetRequest"
+ responses:
+ '200':
+ description: Scraper target updated
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ScraperTargetResponse"
+ default:
+ description: Internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/scrapers/{scraperTargetID}/labels':
+ get:
+ operationId: GetScrapersIDLabels
+ tags:
+ - ScraperTargets
+ summary: List all labels for a scraper target
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: scraperTargetID
+ schema:
+ type: string
+ required: true
+ description: The scraper target ID.
+ responses:
+ '200':
+ description: A list of all labels for a scraper target
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelsResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostScrapersIDLabels
+ tags:
+ - ScraperTargets
+ summary: Add a label to a scraper target
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: scraperTargetID
+ schema:
+ type: string
+ required: true
+ description: The scraper target ID.
+ requestBody:
+ description: Label to add
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelMapping"
+ responses:
+ '201':
+ description: The newly added label
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/scrapers/{scraperTargetID}/labels/{labelID}':
+ delete:
+ operationId: DeleteScrapersIDLabelsID
+ tags:
+ - ScraperTargets
+ summary: Delete a label from a scraper target
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: scraperTargetID
+ schema:
+ type: string
+ required: true
+ description: The scraper target ID.
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The label ID.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Scraper target not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchScrapersIDLabelsID
+ tags:
+ - ScraperTargets
+ summary: Update a label on a scraper target
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: scraperTargetID
+ schema:
+ type: string
+ required: true
+ description: The scraper target ID.
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The label ID.
+ requestBody:
+ description: Label update to apply
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Label"
+ responses:
+ '200':
+ description: Updated successfully
+ '404':
+ description: Scraper target not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/scrapers/{scraperTargetID}/members':
+ get:
+ operationId: GetScrapersIDMembers
+ tags:
+ - Users
+ - ScraperTargets
+ summary: List all users with member privileges for a scraper target
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: scraperTargetID
+ schema:
+ type: string
+ required: true
+ description: The scraper target ID.
+ responses:
+ '200':
+ description: A list of scraper target members
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceMembers"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostScrapersIDMembers
+ tags:
+ - Users
+ - ScraperTargets
+ summary: Add a member to a scraper target
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: scraperTargetID
+ schema:
+ type: string
+ required: true
+ description: The scraper target ID.
+ requestBody:
+ description: User to add as member
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AddResourceMemberRequestBody"
+ responses:
+ '201':
+ description: Member added to scraper targets
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceMember"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/scrapers/{scraperTargetID}/members/{userID}':
+ delete:
+ operationId: DeleteScrapersIDMembersID
+ tags:
+ - Users
+ - ScraperTargets
+ summary: Remove a member from a scraper target
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of member to remove.
+ - in: path
+ name: scraperTargetID
+ schema:
+ type: string
+ required: true
+ description: The scraper target ID.
+ responses:
+ '204':
+ description: Member removed
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/scrapers/{scraperTargetID}/owners':
+ get:
+ operationId: GetScrapersIDOwners
+ tags:
+ - Users
+ - ScraperTargets
+ summary: List all owners of a scraper target
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: scraperTargetID
+ schema:
+ type: string
+ required: true
+ description: The scraper target ID.
+ responses:
+ '200':
+ description: A list of scraper target owners
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceOwners"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostScrapersIDOwners
+ tags:
+ - Users
+ - ScraperTargets
+ summary: Add an owner to a scraper target
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: scraperTargetID
+ schema:
+ type: string
+ required: true
+ description: The scraper target ID.
+ requestBody:
+ description: User to add as owner
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AddResourceMemberRequestBody"
+ responses:
+ '201':
+ description: Scraper target owner added
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceOwner"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/scrapers/{scraperTargetID}/owners/{userID}':
+ delete:
+ operationId: DeleteScrapersIDOwnersID
+ tags:
+ - Users
+ - ScraperTargets
+ summary: Remove an owner from a scraper target
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of owner to remove.
+ - in: path
+ name: scraperTargetID
+ schema:
+ type: string
+ required: true
+ description: The scraper target ID.
+ responses:
+ '204':
+ description: Owner removed
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /variables:
+ get:
+ operationId: GetVariables
+ tags:
+ - Variables
+ summary: Get all variables
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: query
+ name: org
+ description: The organization name.
+ schema:
+ type: string
+ - in: query
+ name: orgID
+ description: The organization ID.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: All variables for an organization
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Variables"
+ '400':
+ description: Invalid request
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostVariables
+ summary: Create a variable
+ tags:
+ - Variables
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ requestBody:
+ description: Variable to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Variable"
+ responses:
+ '201':
+ description: Variable created
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Variable"
+ default:
+ description: Internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/variables/{variableID}':
+ get:
+ operationId: GetVariablesID
+ tags:
+ - Variables
+ summary: Get a variable
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: variableID
+ required: true
+ schema:
+ type: string
+ description: The variable ID.
+ responses:
+ '200':
+ description: Variable found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Variable"
+ '404':
+ description: Variable not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteVariablesID
+ tags:
+ - Variables
+ summary: Delete a variable
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: variableID
+ required: true
+ schema:
+ type: string
+ description: The variable ID.
+ responses:
+ '204':
+ description: Variable deleted
+ default:
+ description: Internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchVariablesID
+ summary: Update a variable
+ tags:
+ - Variables
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: variableID
+ required: true
+ schema:
+ type: string
+ description: The variable ID.
+ requestBody:
+ description: Variable update to apply
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Variable"
+ responses:
+ '200':
+ description: Variable updated
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Variable"
+ default:
+ description: Internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ put:
+ operationId: PutVariablesID
+ summary: Replace a variable
+ tags:
+ - Variables
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: variableID
+ required: true
+ schema:
+ type: string
+ description: The variable ID.
+ requestBody:
+ description: Variable to replace
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Variable"
+ responses:
+ '200':
+ description: Variable updated
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Variable"
+ default:
+ description: Internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/variables/{variableID}/labels':
+ get:
+ operationId: GetVariablesIDLabels
+ tags:
+ - Variables
+ summary: List all labels for a variable
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: variableID
+ schema:
+ type: string
+ required: true
+ description: The variable ID.
+ responses:
+ '200':
+ description: A list of all labels for a variable
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelsResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostVariablesIDLabels
+ tags:
+ - Variables
+ summary: Add a label to a variable
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: variableID
+ schema:
+ type: string
+ required: true
+ description: The variable ID.
+ requestBody:
+ description: Label to add
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelMapping"
+ responses:
+ '201':
+ description: The newly added label
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/variables/{variableID}/labels/{labelID}':
+ delete:
+ operationId: DeleteVariablesIDLabelsID
+ tags:
+ - Variables
+ summary: Delete a label from a variable
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: variableID
+ schema:
+ type: string
+ required: true
+ description: The variable ID.
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The label ID to delete.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Variable not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /write:
+ post:
+ operationId: PostWrite
+ tags:
+ - Write
+ summary: Write time series data into InfluxDB
+ requestBody:
+ description: Line protocol body
+ required: true
+ content:
+ text/plain:
+ schema:
+ type: string
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: header
+ name: Content-Encoding
+ description: When present, its value indicates to the database that compression is applied to the line-protocol body.
+ schema:
+ type: string
+ description: Specifies that the line protocol in the body is encoded with gzip or not encoded with identity.
+ default: identity
+ enum:
+ - gzip
+ - identity
+ - in: header
+ name: Content-Type
+ description: Content-Type is used to indicate the format of the data sent to the server.
+ schema:
+ type: string
+ description: Text/plain specifies the text line protocol; charset is assumed to be utf-8.
+ default: text/plain; charset=utf-8
+ enum:
+ - text/plain
+ - text/plain; charset=utf-8
+ - application/vnd.influx.arrow
+ - in: header
+ name: Content-Length
+ description: Content-Length is an entity header is indicating the size of the entity-body, in bytes, sent to the database. If the length is greater than the database max body configuration option, a 413 response is sent.
+ schema:
+ type: integer
+ description: The length in decimal number of octets.
+ - in: header
+ name: Accept
+ description: Specifies the return content format.
+ schema:
+ type: string
+ description: The return format for errors.
+ default: application/json
+ enum:
+ - application/json
+ - in: query
+ name: org
+ description: Specifies the destination organization for writes. Takes either the ID or Name interchangeably. If both `orgID` and `org` are specified, `org` takes precedence.
+ required: true
+ schema:
+ type: string
+ description: All points within batch are written to this organization.
+ - in: query
+ name: orgID
+ description: Specifies the ID of the destination organization for writes. If both `orgID` and `org` are specified, `org` takes precedence.
+ schema:
+ type: string
+ - in: query
+ name: bucket
+ description: The destination bucket for writes.
+ required: true
+ schema:
+ type: string
+ description: All points within batch are written to this bucket.
+ - in: query
+ name: precision
+ description: The precision for the unix timestamps within the body line-protocol.
+ schema:
+ $ref: "#/components/schemas/WritePrecision"
+ responses:
+ '204':
+ description: Write data is correctly formatted and accepted for writing to the bucket.
+ '400':
+ description: Line protocol poorly formed and no points were written. Response can be used to determine the first malformed line in the body line-protocol. All data in body was rejected and not written.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LineProtocolError"
+ '401':
+ description: Token does not have sufficient permissions to write to this organization and bucket or the organization and bucket do not exist.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '403':
+ description: No token was sent and they are required.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '413':
+ description: Write has been rejected because the payload is too large. Error message returns max size supported. All data in body was rejected and not written.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LineProtocolLengthError"
+ '429':
+ description: Token is temporarily over quota. The Retry-After header describes when to try the write again.
+ headers:
+ Retry-After:
+ description: A non-negative decimal integer indicating the seconds to delay after the response is received.
+ schema:
+ type: integer
+ format: int32
+ '503':
+ description: Server is temporarily unavailable to accept writes. The Retry-After header describes when to try the write again.
+ headers:
+ Retry-After:
+ description: A non-negative decimal integer indicating the seconds to delay after the response is received.
+ schema:
+ type: integer
+ format: int32
+ default:
+ description: Internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /ready:
+ servers:
+ - url: /
+ get:
+ operationId: GetReady
+ tags:
+ - Ready
+ summary: Get the readiness of an instance at startup
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ responses:
+ '200':
+ description: The instance is ready
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Ready"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /health:
+ servers:
+ - url: /
+ get:
+ operationId: GetHealth
+ tags:
+ - Health
+ summary: Get the health of an instance
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ responses:
+ '200':
+ description: The instance is healthy
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/HealthCheck"
+ '503':
+ description: The instance is unhealthy
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/HealthCheck"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /sources:
+ post:
+ operationId: PostSources
+ tags:
+ - Sources
+ summary: Creates a source
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ requestBody:
+ description: Source to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Source"
+ responses:
+ '201':
+ description: Created Source
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Source"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ get:
+ operationId: GetSources
+ tags:
+ - Sources
+ summary: Get all sources
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: query
+ name: org
+ description: The organization name.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: All sources
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Sources"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /sources/{sourceID}:
+ delete:
+ operationId: DeleteSourcesID
+ tags:
+ - Sources
+ summary: Delete a source
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: sourceID
+ schema:
+ type: string
+ required: true
+ description: The source ID.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: View not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchSourcesID
+ tags:
+ - Sources
+ summary: Update a Source
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: sourceID
+ schema:
+ type: string
+ required: true
+ description: The source ID.
+ requestBody:
+ description: Source update
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Source"
+ responses:
+ '200':
+ description: Created Source
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Source"
+ '404':
+ description: Source not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ get:
+ operationId: GetSourcesID
+ tags:
+ - Sources
+ summary: Get a source
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: sourceID
+ schema:
+ type: string
+ required: true
+ description: The source ID.
+ responses:
+ '200':
+ description: A source
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Source"
+ '404':
+ description: Source not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /sources/{sourceID}/health:
+ get:
+ operationId: GetSourcesIDHealth
+ tags:
+ - Sources
+ summary: Get the health of a source
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: sourceID
+ schema:
+ type: string
+ required: true
+ description: The source ID.
+ responses:
+ '200':
+ description: The source is healthy
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/HealthCheck"
+ '503':
+ description: The source is not healthy
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/HealthCheck"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /sources/{sourceID}/buckets:
+ get:
+ operationId: GetSourcesIDBuckets
+ tags:
+ - Sources
+ - Buckets
+ summary: Get buckets in a source
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: sourceID
+ schema:
+ type: string
+ required: true
+ description: The source ID.
+ - in: query
+ name: org
+ description: The organization name.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: A source
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Buckets"
+ '404':
+ description: Source not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /labels:
+ post:
+ operationId: PostLabels
+ tags:
+ - Labels
+ summary: Create a label
+ requestBody:
+ description: Label to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelCreateRequest"
+ responses:
+ '201':
+ description: Added label
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ get:
+ operationId: GetLabels
+ tags:
+ - Labels
+ summary: Get all labels
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: query
+ name: orgID
+ description: The organization ID.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: All labels
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelsResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /labels/{labelID}:
+ get:
+ operationId: GetLabelsID
+ tags:
+ - Labels
+ summary: Get a label
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The ID of the label to update.
+ responses:
+ '200':
+ description: A label
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchLabelsID
+ tags:
+ - Labels
+ summary: Update a label
+ requestBody:
+ description: Label update
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelUpdate"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The ID of the label to update.
+ responses:
+ '200':
+ description: Updated label
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ '404':
+ description: Label not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteLabelsID
+ tags:
+ - Labels
+ summary: Delete a label
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The ID of the label to delete.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Label not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /dashboards:
+ post:
+ operationId: PostDashboards
+ tags:
+ - Dashboards
+ summary: Create a dashboard
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ requestBody:
+ description: Dashboard to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/CreateDashboardRequest"
+ responses:
+ '201':
+ description: Added dashboard
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Dashboard"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ get:
+ operationId: GetDashboards
+ tags:
+ - Dashboards
+ summary: Get all dashboards
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: query
+ name: owner
+ description: The owner ID.
+ schema:
+ type: string
+ - in: query
+ name: sortBy
+ description: The column to sort by.
+ schema:
+ type: string
+ enum:
+ - "ID"
+ - "CreatedAt"
+ - "UpdatedAt"
+ - in: query
+ name: id
+ description: List of dashboard IDs to return. If both `id and `owner` are specified, only `id` is used.
+ schema:
+ type: array
+ items:
+ type: string
+ - in: query
+ name: orgID
+ description: The organization ID.
+ schema:
+ type: string
+ - in: query
+ name: org
+ description: The organization name.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: All dashboards
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Dashboards"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/dashboards/{dashboardID}':
+ get:
+ operationId: GetDashboardsID
+ tags:
+ - Dashboards
+ summary: Get a Dashboard
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The ID of the dashboard to update.
+ responses:
+ '200':
+ description: Get a single dashboard
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Dashboard"
+ '404':
+ description: Dashboard not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchDashboardsID
+ tags:
+ - Dashboards
+ summary: Update a dashboard
+ requestBody:
+ description: Patching of a dashboard
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Dashboard"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The ID of the dashboard to update.
+ responses:
+ '200':
+ description: Updated dashboard
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Dashboard"
+ '404':
+ description: Dashboard not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteDashboardsID
+ tags:
+ - Dashboards
+ summary: Delete a dashboard
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The ID of the dashboard to update.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Dashboard not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/dashboards/{dashboardID}/cells':
+ put:
+ operationId: PutDashboardsIDCells
+ tags:
+ - Cells
+ - Dashboards
+ summary: Replace cells in a dashboard
+ description: Replaces all cells in a dashboard. This is used primarily to update the positional information of all cells.
+ requestBody:
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Cells"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The ID of the dashboard to update.
+ responses:
+ '200':
+ description: Replaced dashboard cells
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Dashboard"
+ '404':
+ description: Dashboard not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostDashboardsIDCells
+ tags:
+ - Cells
+ - Dashboards
+ summary: Create a dashboard cell
+ requestBody:
+ description: Cell that will be added
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/CreateCell"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The ID of the dashboard to update.
+ responses:
+ '201':
+ description: Cell successfully added
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Cell"
+ '404':
+ description: Dashboard not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/dashboards/{dashboardID}/cells/{cellID}':
+ patch:
+ operationId: PatchDashboardsIDCellsID
+ tags:
+ - Cells
+ - Dashboards
+ summary: Update the non-positional information related to a cell
+ description: Updates the non positional information related to a cell. Updates to a single cell's positional data could cause grid conflicts.
+ requestBody:
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/CellUpdate"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The ID of the dashboard to update.
+ - in: path
+ name: cellID
+ schema:
+ type: string
+ required: true
+ description: The ID of the cell to update.
+ responses:
+ '200':
+ description: Updated dashboard cell
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Cell"
+ '404':
+ description: Cell or dashboard not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteDashboardsIDCellsID
+ tags:
+ - Cells
+ - Dashboards
+ summary: Delete a dashboard cell
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The ID of the dashboard to delete.
+ - in: path
+ name: cellID
+ schema:
+ type: string
+ required: true
+ description: The ID of the cell to delete.
+ responses:
+ '204':
+ description: Cell successfully deleted
+ '404':
+ description: Cell or dashboard not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/dashboards/{dashboardID}/cells/{cellID}/view':
+ get:
+ operationId: GetDashboardsIDCellsIDView
+ tags:
+ - Cells
+ - Dashboards
+ - Views
+ summary: Retrieve the view for a cell
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The dashboard ID.
+ - in: path
+ name: cellID
+ schema:
+ type: string
+ required: true
+ description: The cell ID.
+ responses:
+ '200':
+ description: A dashboard cells view
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/View"
+ '404':
+ description: Cell or dashboard not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchDashboardsIDCellsIDView
+ tags:
+ - Cells
+ - Dashboards
+ - Views
+ summary: Update the view for a cell
+ requestBody:
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/View"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The ID of the dashboard to update.
+ - in: path
+ name: cellID
+ schema:
+ type: string
+ required: true
+ description: The ID of the cell to update.
+ responses:
+ '200':
+ description: Updated cell view
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/View"
+ '404':
+ description: Cell or dashboard not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/dashboards/{dashboardID}/labels':
+ get:
+ operationId: GetDashboardsIDLabels
+ tags:
+ - Dashboards
+ summary: list all labels for a dashboard
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The dashboard ID.
+ responses:
+ '200':
+ description: A list of all labels for a dashboard
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelsResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostDashboardsIDLabels
+ tags:
+ - Dashboards
+ summary: Add a label to a dashboard
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The dashboard ID.
+ requestBody:
+ description: Label to add
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelMapping"
+ responses:
+ '201':
+ description: The label added to the dashboard
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/dashboards/{dashboardID}/labels/{labelID}':
+ delete:
+ operationId: DeleteDashboardsIDLabelsID
+ tags:
+ - Dashboards
+ summary: Delete a label from a dashboard
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The dashboard ID.
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The ID of the label to delete.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Dashboard not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/dashboards/{dashboardID}/members':
+ get:
+ operationId: GetDashboardsIDMembers
+ tags:
+ - Users
+ - Dashboards
+ summary: List all dashboard members
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The dashboard ID.
+ responses:
+ '200':
+ description: A list of users who have member privileges for a dashboard
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceMembers"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostDashboardsIDMembers
+ tags:
+ - Users
+ - Dashboards
+ summary: Add a member to a dashboard
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The dashboard ID.
+ requestBody:
+ description: User to add as member
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AddResourceMemberRequestBody"
+ responses:
+ '201':
+ description: Added to dashboard members
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceMember"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/dashboards/{dashboardID}/members/{userID}':
+ delete:
+ operationId: DeleteDashboardsIDMembersID
+ tags:
+ - Users
+ - Dashboards
+ summary: Remove a member from a dashboard
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of the member to remove.
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The dashboard ID.
+ responses:
+ '204':
+ description: Member removed
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/dashboards/{dashboardID}/owners':
+ get:
+ operationId: GetDashboardsIDOwners
+ tags:
+ - Users
+ - Dashboards
+ summary: List all dashboard owners
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The dashboard ID.
+ responses:
+ '200':
+ description: A list of users who have owner privileges for a dashboard
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceOwners"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostDashboardsIDOwners
+ tags:
+ - Users
+ - Dashboards
+ summary: Add an owner to a dashboard
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The dashboard ID.
+ requestBody:
+ description: User to add as owner
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AddResourceMemberRequestBody"
+ responses:
+ '201':
+ description: Added to dashboard owners
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceOwner"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/dashboards/{dashboardID}/owners/{userID}':
+ delete:
+ operationId: DeleteDashboardsIDOwnersID
+ tags:
+ - Users
+ - Dashboards
+ summary: Remove an owner from a dashboard
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of the owner to remove.
+ - in: path
+ name: dashboardID
+ schema:
+ type: string
+ required: true
+ description: The dashboard ID.
+ responses:
+ '204':
+ description: Owner removed
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/dashboards/{dashboardID}/logs':
+ get:
+ operationId: GetDashboardsIDLogs
+ tags:
+ - Dashboards
+ - OperationLogs
+ summary: Retrieve operation logs for a dashboard
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - $ref: '#/components/parameters/Offset'
+ - $ref: '#/components/parameters/Limit'
+ - in: path
+ name: dashboardID
+ required: true
+ description: The dashboard ID.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: Operation logs for the dashboard
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/OperationLogs"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /query/ast:
+ post:
+ operationId: PostQueryAst
+ description: Analyzes flux query and generates a query specification.
+ tags:
+ - Query
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: header
+ name: Content-Type
+ schema:
+ type: string
+ enum:
+ - application/json
+ requestBody:
+ description: Analyzed Flux query to generate abstract syntax tree.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LanguageRequest"
+ responses:
+ '200':
+ description: Abstract syntax tree of flux query.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ASTResponse"
+ default:
+ description: Any response other than 200 is an internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /query/suggestions:
+ get:
+ operationId: GetQuerySuggestions
+ tags:
+ - Query
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ responses:
+ '200':
+ description: Suggestions for next functions in call chain
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/FluxSuggestions"
+ default:
+ description: Any response other than 200 is an internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/query/suggestions/{name}':
+ get:
+ operationId: GetQuerySuggestionsName
+ tags:
+ - Query
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: name
+ schema:
+ type: string
+ required: true
+ description: The name of the branching suggestion.
+ responses:
+ '200':
+ description: Suggestions for next functions in call chain
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/FluxSuggestion"
+ default:
+ description: Any response other than 200 is an internal server error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /authorizations:
+ get:
+ operationId: GetAuthorizations
+ tags:
+ - Authorizations
+ summary: List all authorizations
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: query
+ name: userID
+ schema:
+ type: string
+ description: Only show authorizations that belong to a user ID.
+ - in: query
+ name: user
+ schema:
+ type: string
+ description: Only show authorizations that belong to a user name.
+ - in: query
+ name: orgID
+ schema:
+ type: string
+ description: Only show authorizations that belong to an organization ID.
+ - in: query
+ name: org
+ schema:
+ type: string
+ description: Only show authorizations that belong to a organization name.
+ responses:
+ '200':
+ description: A list of authorizations
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Authorizations"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostAuthorizations
+ tags:
+ - Authorizations
+ summary: Create an authorization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ requestBody:
+ description: Authorization to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Authorization"
+ responses:
+ '201':
+ description: Authorization created
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Authorization"
+ '400':
+ description: Invalid request
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /authorizations/{authID}:
+ get:
+ operationId: GetAuthorizationsID
+ tags:
+ - Authorizations
+ summary: Retrieve an authorization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: authID
+ schema:
+ type: string
+ required: true
+ description: The ID of the authorization to get.
+ responses:
+ '200':
+ description: Authorization details
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Authorization"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchAuthorizationsID
+ tags:
+ - Authorizations
+ summary: Update an authorization to be active or inactive
+ requestBody:
+ description: Authorization to update
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AuthorizationUpdateRequest"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: authID
+ schema:
+ type: string
+ required: true
+ description: The ID of the authorization to update.
+ responses:
+ '200':
+ description: The active or inactie authorization
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Authorization"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteAuthorizationsID
+ tags:
+ - Authorizations
+ summary: Delete a authorization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: authID
+ schema:
+ type: string
+ required: true
+ description: The ID of the authorization to delete.
+ responses:
+ '204':
+ description: Authorization deleted
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /query/analyze:
+ post:
+ operationId: PostQueryAnalyze
+ tags:
+ - Query
+ summary: Analyze an InfluxQL or Flux query
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: header
+ name: Content-Type
+ schema:
+ type: string
+ enum:
+ - application/json
+ requestBody:
+ description: Flux or InfluxQL query to analyze
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Query"
+ responses:
+ '200':
+ description: Query analyze results. Errors will be empty if the query is valid.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AnalyzeQueryResponse"
+ default:
+ description: Internal server error
+ headers:
+ X-Influx-Error:
+ description: Error string describing the problem
+ schema:
+ type: string
+ X-Influx-Reference:
+ description: Reference code unique to the error type
+ schema:
+ type: integer
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /query:
+ post:
+ operationId: PostQuery
+ tags:
+ - Query
+ summary: Query InfluxDB
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: header
+ name: Accept-Encoding
+ description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand.
+ schema:
+ type: string
+ description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity.
+ default: identity
+ enum:
+ - gzip
+ - identity
+ - in: header
+ name: Content-Type
+ schema:
+ type: string
+ enum:
+ - application/json
+ - application/vnd.flux
+ - in: query
+ name: org
+ description: Specifies the name of the organization executing the query. Takes either the ID or Name interchangeably. If both `orgID` and `org` are specified, `org` takes precedence.
+ schema:
+ type: string
+ - in: query
+ name: orgID
+ description: Specifies the ID of the organization executing the query. If both `orgID` and `org` are specified, `org` takes precedence.
+ schema:
+ type: string
+ requestBody:
+ description: Flux query or specification to execute
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Query"
+ application/vnd.flux:
+ schema:
+ type: string
+ responses:
+ '200':
+ description: Query results
+ headers:
+ Content-Encoding:
+ description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body
+ schema:
+ type: string
+ description: Specifies that the response in the body is encoded with gzip or not encoded with identity.
+ default: identity
+ enum:
+ - gzip
+ - identity
+ content:
+ text/csv:
+ schema:
+ type: string
+ example: >
+ result,table,_start,_stop,_time,region,host,_value
+ mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43
+ mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25
+ mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62
+ application/vnd.influx.arrow:
+ schema:
+ type: string
+ format: binary
+ '429':
+ description: Token is temporarily over quota. The Retry-After header describes when to try the read again.
+ headers:
+ Retry-After:
+ description: A non-negative decimal integer indicating the seconds to delay after the response is received.
+ schema:
+ type: integer
+ format: int32
+ default:
+ description: Error processing query
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /buckets:
+ get:
+ operationId: GetBuckets
+ tags:
+ - Buckets
+ summary: List all buckets
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - $ref: "#/components/parameters/Offset"
+ - $ref: "#/components/parameters/Limit"
+ - in: query
+ name: org
+ description: The organization name.
+ schema:
+ type: string
+ - in: query
+ name: orgID
+ description: The organization ID.
+ schema:
+ type: string
+ - in: query
+ name: name
+ description: Only returns buckets with a specific name.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: A list of buckets
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Buckets"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostBuckets
+ tags:
+ - Buckets
+ summary: Create a bucket
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ requestBody:
+ description: Bucket to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Bucket"
+ responses:
+ '201':
+ description: Bucket created
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Bucket"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/buckets/{bucketID}':
+ get:
+ operationId: GetBucketsID
+ tags:
+ - Buckets
+ summary: Retrieve a bucket
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: bucketID
+ schema:
+ type: string
+ required: true
+ description: The bucket ID.
+ responses:
+ '200':
+ description: Bucket details
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Bucket"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchBucketsID
+ tags:
+ - Buckets
+ summary: Update a bucket
+ requestBody:
+ description: Bucket update to apply
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Bucket"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: bucketID
+ schema:
+ type: string
+ required: true
+ description: The bucket ID.
+ responses:
+ '200':
+ description: An updated bucket
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Bucket"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteBucketsID
+ tags:
+ - Buckets
+ summary: Delete a bucket
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: bucketID
+ schema:
+ type: string
+ required: true
+ description: The ID of the bucket to delete.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Bucket not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/buckets/{bucketID}/labels':
+ get:
+ operationId: GetBucketsIDLabels
+ tags:
+ - Buckets
+ summary: List all labels for a bucket
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: bucketID
+ schema:
+ type: string
+ required: true
+ description: The bucket ID.
+ responses:
+ '200':
+ description: A list of all labels for a bucket
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelsResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostBucketsIDLabels
+ tags:
+ - Buckets
+ summary: Add a label to a bucket
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: bucketID
+ schema:
+ type: string
+ required: true
+ description: The bucket ID.
+ requestBody:
+ description: Label to add
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelMapping"
+ responses:
+ '201':
+ description: The newly added label
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/buckets/{bucketID}/labels/{labelID}':
+ delete:
+ operationId: DeleteBucketsIDLabelsID
+ tags:
+ - Buckets
+ summary: delete a label from a bucket
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: bucketID
+ schema:
+ type: string
+ required: true
+ description: The bucket ID.
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The ID of the label to delete.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Bucket not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/buckets/{bucketID}/members':
+ get:
+ operationId: GetBucketsIDMembers
+ tags:
+ - Users
+ - Buckets
+ summary: List all users with member privileges for a bucket
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: bucketID
+ schema:
+ type: string
+ required: true
+ description: The bucket ID.
+ responses:
+ '200':
+ description: A list of bucket members
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceMembers"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostBucketsIDMembers
+ tags:
+ - Users
+ - Buckets
+ summary: Add a member to a bucket
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: bucketID
+ schema:
+ type: string
+ required: true
+ description: The bucket ID.
+ requestBody:
+ description: User to add as member
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AddResourceMemberRequestBody"
+ responses:
+ '201':
+ description: Member added to bucket
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceMember"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/buckets/{bucketID}/members/{userID}':
+ delete:
+ operationId: DeleteBucketsIDMembersID
+ tags:
+ - Users
+ - Buckets
+ summary: Remove a member from a bucket
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of the member to remove.
+ - in: path
+ name: bucketID
+ schema:
+ type: string
+ required: true
+ description: The bucket ID.
+ responses:
+ '204':
+ description: Member removed
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/buckets/{bucketID}/owners':
+ get:
+ operationId: GetBucketsIDOwners
+ tags:
+ - Users
+ - Buckets
+ summary: List all owners of a bucket
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: bucketID
+ schema:
+ type: string
+ required: true
+ description: The bucket ID.
+ responses:
+ '200':
+ description: A list of bucket owners
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceOwners"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostBucketsIDOwners
+ tags:
+ - Users
+ - Buckets
+ summary: Add an owner to a bucket
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: bucketID
+ schema:
+ type: string
+ required: true
+ description: The bucket ID.
+ requestBody:
+ description: User to add as owner
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AddResourceMemberRequestBody"
+ responses:
+ '201':
+ description: Bucket owner added
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceOwner"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/buckets/{bucketID}/owners/{userID}':
+ delete:
+ operationId: DeleteBucketsIDOwnersID
+ tags:
+ - Users
+ - Buckets
+ summary: Remove an owner from a bucket
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of the owner to remove.
+ - in: path
+ name: bucketID
+ schema:
+ type: string
+ required: true
+ description: The bucket ID.
+ responses:
+ '204':
+ description: Owner removed
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/buckets/{bucketID}/logs':
+ get:
+ operationId: GetBucketsIDLogs
+ tags:
+ - Buckets
+ - OperationLogs
+ summary: Retrieve operation logs for a bucket
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - $ref: '#/components/parameters/Offset'
+ - $ref: '#/components/parameters/Limit'
+ - in: path
+ name: bucketID
+ required: true
+ description: The bucket ID.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: Operation logs for the bucket
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/OperationLogs"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /orgs:
+ get:
+ operationId: GetOrgs
+ tags:
+ - Organizations
+ summary: List all organizations
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: query
+ name: org
+ schema:
+ type: string
+ description: Filter organizations to a specific organization name.
+ - in: query
+ name: orgID
+ schema:
+ type: string
+ description: Filter organizations to a specific organization ID.
+ responses:
+ '200':
+ description: A list of organizations
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Organizations"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostOrgs
+ tags:
+ - Organizations
+ summary: Create an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ requestBody:
+ description: Organization to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Organization"
+ responses:
+ '201':
+ description: Organization created
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Organization"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/orgs/{orgID}':
+ get:
+ operationId: GetOrgsID
+ tags:
+ - Organizations
+ summary: Retrieve an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The ID of the organization to get.
+ responses:
+ '200':
+ description: Organization details
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Organization"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchOrgsID
+ tags:
+ - Organizations
+ summary: Update an organization
+ requestBody:
+ description: Organization update to apply
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Organization"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The ID of the organization to get.
+ responses:
+ '200':
+ description: Organization updated
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Organization"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteOrgsID
+ tags:
+ - Organizations
+ summary: Delete an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The ID of the organization to delete.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Organization not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/orgs/{orgID}/labels':
+ get:
+ operationId: GetOrgsIDLabels
+ tags:
+ - Organizations
+ summary: List all labels for a organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The organization ID.
+ responses:
+ '200':
+ description: A list of all labels for an organization
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelsResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostOrgsIDLabels
+ tags:
+ - Organizations
+ summary: Add a label to an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The organization ID.
+ requestBody:
+ description: Label to add
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelMapping"
+ responses:
+ '201':
+ description: Returns the created label
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/orgs/{orgID}/labels/{labelID}':
+ delete:
+ operationId: DeleteOrgsIDLabelsID
+ tags:
+ - Organizations
+ summary: Delete a label from an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The organization ID.
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The label ID.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Organization not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/orgs/{orgID}/secrets':
+ get:
+ operationId: GetOrgsIDSecrets
+ tags:
+ - Secrets
+ - Organizations
+ summary: List all secret keys for an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The organization ID.
+ responses:
+ '200':
+ description: A list of all secret keys
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/SecretKeysResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchOrgsIDSecrets
+ tags:
+ - Secrets
+ - Organizations
+ summary: Update secrets in an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The organization ID.
+ requestBody:
+ description: Secret key value pairs to update/add
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Secrets"
+ responses:
+ '204':
+ description: Keys successfully patched
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/orgs/{orgID}/secrets/delete': # had to make this because swagger wouldn't let me have a request body with a DELETE
+ post:
+ operationId: PostOrgsIDSecrets
+ tags:
+ - Secrets
+ - Organizations
+ summary: Delete secrets from an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The organization ID.
+ requestBody:
+ description: Secret key to delete
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/SecretKeys"
+ responses:
+ '204':
+ description: Keys successfully patched
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/orgs/{orgID}/members':
+ get:
+ operationId: GetOrgsIDMembers
+ tags:
+ - Users
+ - Organizations
+ summary: List all members of an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The organization ID.
+ responses:
+ '200':
+ description: A list of organization members
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceMembers"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostOrgsIDMembers
+ tags:
+ - Users
+ - Organizations
+ summary: Add a member to an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The organization ID.
+ requestBody:
+ description: User to add as member
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AddResourceMemberRequestBody"
+ responses:
+ '201':
+ description: Added to organization created
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceMember"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/orgs/{orgID}/members/{userID}':
+ delete:
+ operationId: DeleteOrgsIDMembersID
+ tags:
+ - Users
+ - Organizations
+ summary: Remove a member from an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of the member to remove.
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The organization ID.
+ responses:
+ '204':
+ description: Member removed
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/orgs/{orgID}/owners':
+ get:
+ operationId: GetOrgsIDOwners
+ tags:
+ - Users
+ - Organizations
+ summary: List all owners of an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The organization ID.
+ responses:
+ '200':
+ description: A list of organization owners
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceOwners"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostOrgsIDOwners
+ tags:
+ - Users
+ - Organizations
+ summary: Add an owner to an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The organization ID.
+ requestBody:
+ description: User to add as owner
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AddResourceMemberRequestBody"
+ responses:
+ '201':
+ description: Organization owner added
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceOwner"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/orgs/{orgID}/owners/{userID}':
+ delete:
+ operationId: DeleteOrgsIDOwnersID
+ tags:
+ - Users
+ - Organizations
+ summary: Remove an owner from an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of the owner to remove.
+ - in: path
+ name: orgID
+ schema:
+ type: string
+ required: true
+ description: The organization ID.
+ responses:
+ '204':
+ description: Owner removed
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/orgs/{orgID}/logs':
+ get:
+ operationId: GetOrgsIDLogs
+ tags:
+ - Organizations
+ - OperationLogs
+ summary: Retrieve operation logs for an organization
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - $ref: '#/components/parameters/Offset'
+ - $ref: '#/components/parameters/Limit'
+ - in: path
+ name: orgID
+ required: true
+ description: The organization ID.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: Operation logs for the organization
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/OperationLogs"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /tasks:
+ get:
+ operationId: GetTasks
+ tags:
+ - Tasks
+ summary: List all tasks
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: query
+ name: name
+ description: Returns task with a specific name.
+ schema:
+ type: string
+ - in: query
+ name: after
+ schema:
+ type: string
+ description: Return tasks after a specified ID.
+ - in: query
+ name: user
+ schema:
+ type: string
+ description: Filter tasks to a specific user ID.
+ - in: query
+ name: org
+ schema:
+ type: string
+ description: Filter tasks to a specific organization name.
+ - in: query
+ name: orgID
+ schema:
+ type: string
+ description: Filter tasks to a specific organization ID.
+ - in: query
+ name: limit
+ schema:
+ type: integer
+ minimum: 1
+ maximum: 500
+ default: 100
+ description: The number of tasks to return
+ responses:
+ '200':
+ description: A list of tasks
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Tasks"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostTasks
+ tags:
+ - Tasks
+ summary: Create a new task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ requestBody:
+ description: Task to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/TaskCreateRequest"
+ responses:
+ '201':
+ description: Task created
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Task"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/tasks/{taskID}':
+ get:
+ operationId: GetTasksID
+ tags:
+ - Tasks
+ summary: Retrieve a task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ responses:
+ '200':
+ description: Task details
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Task"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchTasksID
+ tags:
+ - Tasks
+ summary: Update a task
+ description: Update a task. This will cancel all queued runs.
+ requestBody:
+ description: Task update to apply
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/TaskUpdateRequest"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ responses:
+ '200':
+ description: Task updated
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Task"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteTasksID
+ tags:
+ - Tasks
+ summary: Delete a task
+ description: Deletes a task and all associated records
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The ID of the task to delete.
+ responses:
+ '204':
+ description: Task deleted
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/tasks/{taskID}/runs':
+ get:
+ operationId: GetTasksIDRuns
+ tags:
+ - Tasks
+ summary: List runs for a task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The ID of the task to get runs for.
+ - in: query
+ name: after
+ schema:
+ type: string
+ description: Returns runs after a specific ID.
+ - in: query
+ name: limit
+ schema:
+ type: integer
+ minimum: 1
+ maximum: 500
+ default: 100
+ description: The number of runs to return
+ - in: query
+ name: afterTime
+ schema:
+ type: string
+ format: date-time
+ description: Filter runs to those scheduled after this time, RFC3339
+ - in: query
+ name: beforeTime
+ schema:
+ type: string
+ format: date-time
+ description: Filter runs to those scheduled before this time, RFC3339
+ responses:
+ '200':
+ description: A list of task runs
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Runs"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostTasksIDRuns
+ tags:
+ - Tasks
+ summary: Manually start a task run, overriding the current schedule
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/RunManually"
+ responses:
+ '201':
+ description: Run scheduled to start
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Run"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/tasks/{taskID}/runs/{runID}':
+ get:
+ operationId: GetTasksIDRunsID
+ tags:
+ - Tasks
+ summary: Retrieve a single run for a task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ - in: path
+ name: runID
+ schema:
+ type: string
+ required: true
+ description: The run ID.
+ responses:
+ '200':
+ description: The run record
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Run"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteTasksIDRunsID
+ tags:
+ - Tasks
+ summary: Cancel a running task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ - in: path
+ name: runID
+ schema:
+ type: string
+ required: true
+ description: The run ID.
+ responses:
+ '204':
+ description: Delete has been accepted
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/tasks/{taskID}/runs/{runID}/retry':
+ post:
+ operationId: PostTasksIDRunsIDRetry
+ tags:
+ - Tasks
+ summary: Retry a task run
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ - in: path
+ name: runID
+ schema:
+ type: string
+ required: true
+ description: The run ID.
+ responses:
+ '200':
+ description: Run that has been queued
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Run"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/tasks/{taskID}/logs':
+ get:
+ operationId: GetTasksIDLogs
+ tags:
+ - Tasks
+ summary: Retrieve all logs for a task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ responses:
+ '200':
+ description: All logs for a task
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Logs"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/tasks/{taskID}/runs/{runID}/logs':
+ get:
+ operationId: GetTasksIDRunsIDLogs
+ tags:
+ - Tasks
+ summary: Retrieve all logs for a run
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: ID of task to get logs for.
+ - in: path
+ name: runID
+ schema:
+ type: string
+ required: true
+ description: ID of run to get logs for.
+ responses:
+ '200':
+ description: All logs for a run
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Logs"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/tasks/{taskID}/labels':
+ get:
+ operationId: GetTasksIDLabels
+ tags:
+ - Tasks
+ summary: List all labels for a task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ responses:
+ '200':
+ description: A list of all labels for a task
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelsResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostTasksIDLabels
+ tags:
+ - Tasks
+ summary: Add a label to a task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ requestBody:
+ description: Label to add
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelMapping"
+ responses:
+ '200':
+ description: A list of all labels for a task
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/tasks/{taskID}/labels/{labelID}':
+ delete:
+ operationId: DeleteTasksIDLabelsID
+ tags:
+ - Tasks
+ summary: Delete a label from a task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The label ID.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Task not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /me:
+ get:
+ operationId: GetMe
+ tags:
+ - Users
+ summary: Return the current authenticated user
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ responses:
+ '200':
+ description: Currently authenticated user
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/User"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /me/password:
+ put:
+ operationId: PutMePassword
+ tags:
+ - Users
+ summary: Update a password
+ security:
+ - BasicAuth: []
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ requestBody:
+ description: New password
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/PasswordResetBody"
+ responses:
+ '204':
+ description: Password successfully updated
+ default:
+ description: Unsuccessful authentication
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/tasks/{taskID}/members':
+ get:
+ operationId: GetTasksIDMembers
+ tags:
+ - Users
+ - Tasks
+ summary: List all task members
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ responses:
+ '200':
+ description: A list of users who have member privileges for a task
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceMembers"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostTasksIDMembers
+ tags:
+ - Users
+ - Tasks
+ summary: Add a member to a task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ requestBody:
+ description: User to add as member
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AddResourceMemberRequestBody"
+ responses:
+ '201':
+ description: Added to task members
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceMember"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/tasks/{taskID}/members/{userID}':
+ delete:
+ operationId: DeleteTasksIDMembersID
+ tags:
+ - Users
+ - Tasks
+ summary: Remove a member from a task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of the member to remove.
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ responses:
+ '204':
+ description: Member removed
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/tasks/{taskID}/owners':
+ get:
+ operationId: GetTasksIDOwners
+ tags:
+ - Users
+ - Tasks
+ summary: List all owners of a task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ responses:
+ '200':
+ description: A list of users who have owner privileges for a task
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceOwners"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostTasksIDOwners
+ tags:
+ - Users
+ - Tasks
+ summary: Add an owner to a task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ requestBody:
+ description: User to add as owner
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/AddResourceMemberRequestBody"
+ responses:
+ '201':
+ description: Added to task owners
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/ResourceOwner"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/tasks/{taskID}/owners/{userID}':
+ delete:
+ operationId: DeleteTasksIDOwnersID
+ tags:
+ - Users
+ - Tasks
+ summary: Remove an owner from a task
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of the owner to remove.
+ - in: path
+ name: taskID
+ schema:
+ type: string
+ required: true
+ description: The task ID.
+ responses:
+ '204':
+ description: Owner removed
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /users:
+ get:
+ operationId: GetUsers
+ tags:
+ - Users
+ summary: List all users
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ responses:
+ '200':
+ description: A list of users
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Users"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostUsers
+ tags:
+ - Users
+ summary: Create a user
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ requestBody:
+ description: User to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/User"
+ responses:
+ '201':
+ description: User created
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/User"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/users/{userID}':
+ get:
+ operationId: GetUsersID
+ tags:
+ - Users
+ summary: Retrieve a user
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The user ID.
+ responses:
+ '200':
+ description: User details
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/User"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchUsersID
+ tags:
+ - Users
+ summary: Update a user
+ requestBody:
+ description: User update to apply
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/User"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of the user to update.
+ responses:
+ '200':
+ description: User updated
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/User"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteUsersID
+ tags:
+ - Users
+ summary: Delete a user
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The ID of the user to delete.
+ responses:
+ '204':
+ description: User deleted
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/users/{userID}/password':
+ put:
+ operationId: PutUsersIDPassword
+ tags:
+ - Users
+ summary: Update a password
+ security:
+ - BasicAuth: []
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: userID
+ schema:
+ type: string
+ required: true
+ description: The user ID.
+ requestBody:
+ description: New password
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/PasswordResetBody"
+ responses:
+ '204':
+ description: Password successfully updated
+ default:
+ description: Unsuccessful authentication
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/users/{userID}/logs':
+ get:
+ operationId: GetUsersIDLogs
+ tags:
+ - Users
+ - OperationLogs
+ summary: Retrieve operation logs for a user
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - $ref: '#/components/parameters/Offset'
+ - $ref: '#/components/parameters/Limit'
+ - in: path
+ name: userID
+ required: true
+ description: The user ID.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: Operation logs for the user
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/OperationLogs"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /checks:
+ get:
+ operationId: GetChecks
+ tags:
+ - Checks
+ summary: Get all checks
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - $ref: '#/components/parameters/Offset'
+ - $ref: '#/components/parameters/Limit'
+ - in: query
+ name: orgID
+ required: true
+ description: Only show checks that belong to a specific organization ID.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: A list of checks
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Checks"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: CreateCheck
+ tags:
+ - Checks
+ summary: Add new check
+ requestBody:
+ description: Check to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Check"
+ responses:
+ '201':
+ description: Check created
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Check"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/checks/{checkID}':
+ get:
+ operationId: GetChecksID
+ tags:
+ - Checks
+ summary: Get a check
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: checkID
+ schema:
+ type: string
+ required: true
+ description: The check ID.
+ responses:
+ '200':
+ description: The check requested
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Check"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ put:
+ operationId: PutChecksID
+ tags:
+ - Checks
+ summary: Update a check
+ requestBody:
+ description: Check update to apply
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Check"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: checkID
+ schema:
+ type: string
+ required: true
+ description: The check ID.
+ responses:
+ '200':
+ description: An updated check
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Check"
+ '404':
+ description: The check was not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchChecksID
+ tags:
+ - Checks
+ summary: Update a check
+ requestBody:
+ description: Check update to apply
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/CheckPatch"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: checkID
+ schema:
+ type: string
+ required: true
+ description: The check ID.
+ responses:
+ '200':
+ description: An updated check
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Check"
+ '404':
+ description: The check was not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteChecksID
+ tags:
+ - Checks
+ summary: Delete a check
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: checkID
+ schema:
+ type: string
+ required: true
+ description: The check ID.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: The check was not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/checks/{checkID}/labels':
+ get:
+ operationId: GetChecksIDLabels
+ tags:
+ - Checks
+ summary: List all labels for a check
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: checkID
+ schema:
+ type: string
+ required: true
+ description: The check ID.
+ responses:
+ '200':
+ description: A list of all labels for a check
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelsResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostChecksIDLabels
+ tags:
+ - Checks
+ summary: Add a label to a check
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: checkID
+ schema:
+ type: string
+ required: true
+ description: The check ID.
+ requestBody:
+ description: Label to add
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelMapping"
+ responses:
+ '201':
+ description: The label was added to the check
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/checks/{checkID}/labels/{labelID}':
+ delete:
+ operationId: DeleteChecksIDLabelsID
+ tags:
+ - Checks
+ summary: Delete label from a check
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: checkID
+ schema:
+ type: string
+ required: true
+ description: The check ID.
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The ID of the label to delete.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Check or label not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /notificationRules:
+ get:
+ operationId: GetNotificationRules
+ tags:
+ - NotificationRules
+ summary: Get all notification rules
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - $ref: '#/components/parameters/Offset'
+ - $ref: '#/components/parameters/Limit'
+ - in: query
+ name: orgID
+ required: true
+ description: Only show notification rules that belong to a specific organization ID.
+ schema:
+ type: string
+ - in: query
+ name: checkID
+ description: Only show notifications that belong to the specific check ID.
+ schema:
+ type: string
+ - in: query
+ name: tag
+ description: Only show notification rules that match a tag pair. Uses `AND` to specify multiple tags.
+ schema:
+ type: string
+ pattern: ^[a-zA-Z0-9_]+:[a-zA-Z0-9_]+$
+ example: env:prod
+ responses:
+ '200':
+ description: A list of notification rules
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationRules"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: CreateNotificationRule
+ tags:
+ - NotificationRules
+ summary: Add a notification rule
+ requestBody:
+ description: Notification rule to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationRule"
+ responses:
+ '201':
+ description: Notification rule created
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationRule"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/checks/{checkID}/query':
+ get:
+ operationId: GetChecksIDQuery
+ tags:
+ - Checks
+ summary: Get a check query
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: checkID
+ schema:
+ type: string
+ required: true
+ description: The check ID.
+ responses:
+ '200':
+ description: The check query requested
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/FluxResponse"
+ '400':
+ description: Invalid request
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '404':
+ description: Check not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/notificationRules/{ruleID}':
+ get:
+ operationId: GetNotificationRulesID
+ tags:
+ - NotificationRules
+ summary: Get a notification rule
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: ruleID
+ schema:
+ type: string
+ required: true
+ description: The notification rule ID.
+ responses:
+ '200':
+ description: The notification rule requested
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationRule"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ put:
+ operationId: PutNotificationRulesID
+ tags:
+ - NotificationRules
+ summary: Update a notification rule
+ requestBody:
+ description: Notification rule update to apply
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationRule"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: ruleID
+ schema:
+ type: string
+ required: true
+ description: The notification rule ID.
+ responses:
+ '200':
+ description: An updated notification rule
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationRule"
+ '404':
+ description: The notification rule was not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchNotificationRulesID
+ tags:
+ - NotificationRules
+ summary: Update a notification rule
+ requestBody:
+ description: Notification rule update to apply
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationRuleUpdate"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: ruleID
+ schema:
+ type: string
+ required: true
+ description: The notification rule ID.
+ responses:
+ '200':
+ description: An updated notification rule
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationRule"
+ '404':
+ description: The notification rule was not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteNotificationRulesID
+ tags:
+ - NotificationRules
+ summary: Delete a notification rule
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: ruleID
+ schema:
+ type: string
+ required: true
+ description: The notification rule ID.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: The check was not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/notificationRules/{ruleID}/labels':
+ get:
+ operationId: GetNotificationRulesIDLabels
+ tags:
+ - NotificationRules
+ summary: List all labels for a notification rule
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: ruleID
+ schema:
+ type: string
+ required: true
+ description: The notification rule ID.
+ responses:
+ '200':
+ description: A list of all labels for a notification rule
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelsResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostNotificationRuleIDLabels
+ tags:
+ - NotificationRules
+ summary: Add a label to a notification rule
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: ruleID
+ schema:
+ type: string
+ required: true
+ description: The notification rule ID.
+ requestBody:
+ description: Label to add
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelMapping"
+ responses:
+ '201':
+ description: The label was added to the notification rule
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/notificationRules/{ruleID}/labels/{labelID}':
+ delete:
+ operationId: DeleteNotificationRulesIDLabelsID
+ tags:
+ - NotificationRules
+ summary: Delete label from a notification rule
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: ruleID
+ schema:
+ type: string
+ required: true
+ description: The notification rule ID.
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The ID of the label to delete.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Rule or label not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/notificationRules/{ruleID}/query':
+ get:
+ operationId: GetNotificationRulesIDQuery
+ tags:
+ - Rules
+ summary: Get a notification rule query
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: ruleID
+ schema:
+ type: string
+ required: true
+ description: The notification rule ID.
+ responses:
+ '200':
+ description: The notification rule query requested
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/FluxResponse"
+ '400':
+ description: Invalid request
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '404':
+ description: Notification rule not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ /notificationEndpoints:
+ get:
+ operationId: GetNotificationEndpoints
+ tags:
+ - NotificationEndpoints
+ summary: Get all notification endpoints
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - $ref: '#/components/parameters/Offset'
+ - $ref: '#/components/parameters/Limit'
+ - in: query
+ name: orgID
+ required: true
+ description: Only show notification endpoints that belong to specific organization ID.
+ schema:
+ type: string
+ responses:
+ '200':
+ description: A list of notification endpoints
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationEndpoints"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: CreateNotificationEndpoint
+ tags:
+ - NotificationEndpoints
+ summary: Add a notification endpoint
+ requestBody:
+ description: Notification endpoint to create
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationEndpoint"
+ responses:
+ '201':
+ description: Notification endpoint created
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationEndpoint"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/notificationEndpoints/{endpointID}':
+ get:
+ operationId: GetNotificationEndpointsID
+ tags:
+ - NotificationEndpoints
+ summary: Get a notification endpoint
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: endpointID
+ schema:
+ type: string
+ required: true
+ description: The notification endpoint ID.
+ responses:
+ '200':
+ description: The notification endpoint requested
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationEndpoint"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ put:
+ operationId: PutNotificationEndpointsID
+ tags:
+ - NotificationEndpoints
+ summary: Update a notification endpoint
+ requestBody:
+ description: A new notification endpoint to replace the existing endpoint with
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationEndpoint"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: endpointID
+ schema:
+ type: string
+ required: true
+ description: The notification endpoint ID.
+ responses:
+ '200':
+ description: An updated notification endpoint
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationEndpoint"
+ '404':
+ description: The notification endpoint was not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ patch:
+ operationId: PatchNotificationEndpointsID
+ tags:
+ - NotificationEndpoints
+ summary: Update a notification endpoint
+ requestBody:
+ description: Check update to apply
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationEndpointUpdate"
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: endpointID
+ schema:
+ type: string
+ required: true
+ description: The notification endpoint ID.
+ responses:
+ '200':
+ description: An updated notification endpoint
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/NotificationEndpoint"
+ '404':
+ description: The notification endpoint was not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ delete:
+ operationId: DeleteNotificationEndpointsID
+ tags:
+ - NotificationEndpoints
+ summary: Delete a notification endpoint
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: endpointID
+ schema:
+ type: string
+ required: true
+ description: The notification endpoint ID.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: The endpoint was not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/notificationEndpoints/{endpointID}/labels':
+ get:
+ operationId: GetNotificationEndpointsIDLabels
+ tags:
+ - NotificationEndpoints
+ summary: List all labels for a notification endpoint
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: endpointID
+ schema:
+ type: string
+ required: true
+ description: The notification endpoint ID.
+ responses:
+ '200':
+ description: A list of all labels for a notification endpoint
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelsResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ post:
+ operationId: PostNotificationEndpointIDLabels
+ tags:
+ - NotificationEndpoints
+ summary: Add a label to a notification endpoint
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: endpointID
+ schema:
+ type: string
+ required: true
+ description: The notification endpoint ID.
+ requestBody:
+ description: Label to add
+ required: true
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelMapping"
+ responses:
+ '201':
+ description: The label was added to the notification endpoint
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/LabelResponse"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ '/notificationEndpoints/{endpointID}/labels/{labelID}':
+ delete:
+ operationId: DeleteNotificationEndpointsIDLabelsID
+ tags:
+ - NotificationEndpoints
+ summary: Delete a label from a notification endpoint
+ parameters:
+ - $ref: '#/components/parameters/TraceSpan'
+ - in: path
+ name: endpointID
+ schema:
+ type: string
+ required: true
+ description: The notification endpoint ID.
+ - in: path
+ name: labelID
+ schema:
+ type: string
+ required: true
+ description: The ID of the label to delete.
+ responses:
+ '204':
+ description: Delete has been accepted
+ '404':
+ description: Endpoint or label not found
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ default:
+ description: Unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+components:
+ parameters:
+ Offset:
+ in: query
+ name: offset
+ required: false
+ schema:
+ type: integer
+ minimum: 0
+ Limit:
+ in: query
+ name: limit
+ required: false
+ schema:
+ type: integer
+ minimum: 1
+ maximum: 100
+ default: 20
+ Descending:
+ in: query
+ name: descending
+ required: false
+ schema:
+ type: boolean
+ default: false
+ SortBy:
+ in: query
+ name: sortBy
+ required: false
+ schema:
+ type: string
+ TraceSpan:
+ in: header
+ name: Zap-Trace-Span
+ description: OpenTracing span context
+ example:
+ trace_id: '1'
+ span_id: '1'
+ baggage:
+ key: value
+ required: false
+ schema:
+ type: string
+ schemas:
+ LanguageRequest:
+ description: Flux query to be analyzed.
+ type: object
+ required:
+ - query
+ properties:
+ query:
+ description: Flux query script to be analyzed
+ type: string
+ Query:
+ description: Query influx with specific return formatting.
+ type: object
+ required:
+ - query
+ properties:
+ extern:
+ $ref: "#/components/schemas/File"
+ query:
+ description: Query script to execute.
+ type: string
+ type:
+ description: The type of query.
+ type: string
+ default: flux
+ enum:
+ - flux
+ - influxql
+ db:
+ description: Required for `influxql` type queries.
+ type: string
+ rp:
+ description: Required for `influxql` type queries.
+ type: string
+ cluster:
+ description: Required for `influxql` type queries.
+ type: string
+ dialect:
+ $ref: "#/components/schemas/Dialect"
+ Package:
+ description: Represents a complete package source tree.
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ path:
+ description: Package import path
+ type: string
+ package:
+ description: Package name
+ type: string
+ files:
+ description: Package files
+ type: array
+ items:
+ $ref: "#/components/schemas/File"
+ File:
+ description: Represents a source from a single file
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ name:
+ description: The name of the file.
+ type: string
+ package:
+ $ref: "#/components/schemas/PackageClause"
+ imports:
+ description: A list of package imports
+ type: array
+ items:
+ $ref: "#/components/schemas/ImportDeclaration"
+ body:
+ description: List of Flux statements
+ type: array
+ items:
+ $ref: "#/components/schemas/Statement"
+ PackageClause:
+ description: Defines a package identifier
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ name:
+ $ref: "#/components/schemas/Identifier"
+ ImportDeclaration:
+ description: Declares a package import
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ as:
+ $ref: "#/components/schemas/Identifier"
+ path:
+ $ref: "#/components/schemas/StringLiteral"
+ Node:
+ oneOf:
+ - $ref: "#/components/schemas/Expression"
+ - $ref: "#/components/schemas/Block"
+ Block:
+ description: A set of statements
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ body:
+ description: Block body
+ type: array
+ items:
+ $ref: "#/components/schemas/Statement"
+ Statement:
+ oneOf:
+ - $ref: "#/components/schemas/BadStatement"
+ - $ref: "#/components/schemas/VariableAssignment"
+ - $ref: "#/components/schemas/MemberAssignment"
+ - $ref: "#/components/schemas/ExpressionStatement"
+ - $ref: "#/components/schemas/ReturnStatement"
+ - $ref: "#/components/schemas/OptionStatement"
+ - $ref: "#/components/schemas/BuiltinStatement"
+ - $ref: "#/components/schemas/TestStatement"
+ BadStatement:
+ description: A placeholder for statements for which no correct statement nodes can be created
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ text:
+ description: Raw source text
+ type: string
+ VariableAssignment:
+ description: Represents the declaration of a variable
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ id:
+ $ref: "#/components/schemas/Identifier"
+ init:
+ $ref: "#/components/schemas/Expression"
+ MemberAssignment:
+ description: Object property assignment
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ member:
+ $ref: "#/components/schemas/MemberExpression"
+ init:
+ $ref: "#/components/schemas/Expression"
+ ExpressionStatement:
+ description: May consist of an expression that does not return a value and is executed solely for its side-effects
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ expression:
+ $ref: "#/components/schemas/Expression"
+ ReturnStatement:
+ description: Defines an expression to return
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ argument:
+ $ref: "#/components/schemas/Expression"
+ OptionStatement:
+ description: A single variable declaration
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ assignment:
+ oneOf:
+ - $ref: "#/components/schemas/VariableAssignment"
+ - $ref: "#/components/schemas/MemberAssignment"
+ BuiltinStatement:
+ description: Declares a builtin identifier and its type
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ id:
+ $ref: "#/components/schemas/Identifier"
+ TestStatement:
+ description: Declares a Flux test case
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ assignment:
+ $ref: "#/components/schemas/VariableAssignment"
+ Expression:
+ oneOf:
+ - $ref: "#/components/schemas/ArrayExpression"
+ - $ref: "#/components/schemas/FunctionExpression"
+ - $ref: "#/components/schemas/BinaryExpression"
+ - $ref: "#/components/schemas/CallExpression"
+ - $ref: "#/components/schemas/ConditionalExpression"
+ - $ref: "#/components/schemas/LogicalExpression"
+ - $ref: "#/components/schemas/MemberExpression"
+ - $ref: "#/components/schemas/IndexExpression"
+ - $ref: "#/components/schemas/ObjectExpression"
+ - $ref: "#/components/schemas/ParenExpression"
+ - $ref: "#/components/schemas/PipeExpression"
+ - $ref: "#/components/schemas/UnaryExpression"
+ - $ref: "#/components/schemas/BooleanLiteral"
+ - $ref: "#/components/schemas/DateTimeLiteral"
+ - $ref: "#/components/schemas/DurationLiteral"
+ - $ref: "#/components/schemas/FloatLiteral"
+ - $ref: "#/components/schemas/IntegerLiteral"
+ - $ref: "#/components/schemas/PipeLiteral"
+ - $ref: "#/components/schemas/RegexpLiteral"
+ - $ref: "#/components/schemas/StringLiteral"
+ - $ref: "#/components/schemas/UnsignedIntegerLiteral"
+ - $ref: "#/components/schemas/Identifier"
+ ArrayExpression:
+ description: Used to create and directly specify the elements of an array object
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ elements:
+ description: Elements of the array
+ type: array
+ items:
+ $ref: "#/components/schemas/Expression"
+ FunctionExpression:
+ description: Function expression
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ params:
+ description: Function parameters
+ type: array
+ items:
+ $ref: "#/components/schemas/Property"
+ body:
+ $ref: "#/components/schemas/Node"
+ BinaryExpression:
+ description: uses binary operators to act on two operands in an expression
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ operator:
+ type: string
+ left:
+ $ref: "#/components/schemas/Expression"
+ right:
+ $ref: "#/components/schemas/Expression"
+ CallExpression:
+ description: Represents a function call
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ callee:
+ $ref: "#/components/schemas/Expression"
+ arguments:
+ description: Function arguments
+ type: array
+ items:
+ $ref: "#/components/schemas/Expression"
+ ConditionalExpression:
+ description: Selects one of two expressions, `Alternate` or `Consequent`, depending on a third boolean expression, `Test`
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ test:
+ $ref: "#/components/schemas/Expression"
+ alternate:
+ $ref: "#/components/schemas/Expression"
+ consequent:
+ $ref: "#/components/schemas/Expression"
+ LogicalExpression:
+ description: Represents the rule conditions that collectively evaluate to either true or false
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ operator:
+ type: string
+ left:
+ $ref: "#/components/schemas/Expression"
+ right:
+ $ref: "#/components/schemas/Expression"
+ MemberExpression:
+ description: Represents accessing a property of an object
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ object:
+ $ref: "#/components/schemas/Expression"
+ property:
+ $ref: "#/components/schemas/PropertyKey"
+ IndexExpression:
+ description: Represents indexing into an array
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ array:
+ $ref: "#/components/schemas/Expression"
+ index:
+ $ref: "#/components/schemas/Expression"
+ ObjectExpression:
+ description: Allows the declaration of an anonymous object within a declaration
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ properties:
+ description: Object properties
+ type: array
+ items:
+ $ref: "#/components/schemas/Property"
+ ParenExpression:
+ description: Represents an expression wrapped in parenthesis
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ expression:
+ $ref: "#/components/schemas/Expression"
+ PipeExpression:
+ description: Call expression with pipe argument
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ argument:
+ $ref: "#/components/schemas/Expression"
+ call:
+ $ref: "#/components/schemas/CallExpression"
+ UnaryExpression:
+ description: Uses operators to act on a single operand in an expression
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ operator:
+ type: string
+ argument:
+ $ref: "#/components/schemas/Expression"
+ BooleanLiteral:
+ description: Represents boolean values
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ value:
+ type: boolean
+ DateTimeLiteral:
+ description: Represents an instant in time with nanosecond precision using the syntax of golang's RFC3339 Nanosecond variant
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ value:
+ type: string
+ DurationLiteral:
+ description: Represents the elapsed time between two instants as an int64 nanosecond count with syntax of golang's time.Duration
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ values:
+ description: Duration values
+ type: array
+ items:
+ $ref: "#/components/schemas/Duration"
+ FloatLiteral:
+ description: Represents floating point numbers according to the double representations defined by the IEEE-754-1985
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ value:
+ type: number
+ IntegerLiteral:
+ description: Represents integer numbers
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ value:
+ type: string
+ PipeLiteral:
+ description: Represents a specialized literal value, indicating the left hand value of a pipe expression
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ RegexpLiteral:
+ description: Expressions begin and end with `/` and are regular expressions with syntax accepted by RE2
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ value:
+ type: string
+ StringLiteral:
+ description: Expressions begin and end with double quote marks
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ value:
+ type: string
+ UnsignedIntegerLiteral:
+ description: Represents integer numbers
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ value:
+ type: string
+ Duration:
+ description: A pair consisting of length of time and the unit of time measured. It is the atomic unit from which all duration literals are composed.
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ magnitude:
+ type: integer
+ unit:
+ type: string
+ Property:
+ description: The value associated with a key
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ key:
+ $ref: "#/components/schemas/PropertyKey"
+ value:
+ $ref: "#/components/schemas/Expression"
+ PropertyKey:
+ oneOf:
+ - $ref: "#/components/schemas/Identifier"
+ - $ref: "#/components/schemas/StringLiteral"
+ Identifier:
+ description: A valid Flux identifier
+ type: object
+ properties:
+ type:
+ $ref: "#/components/schemas/NodeType"
+ name:
+ type: string
+ NodeType:
+ description: Type of AST node
+ type: string
+ Dialect:
+ description: Dialect are options to change the default CSV output format; https://www.w3.org/TR/2015/REC-tabular-metadata-20151217/#dialect-descriptions
+ type: object
+ properties:
+ header:
+ description: If true, the results will contain a header row
+ type: boolean
+ default: true
+ delimiter:
+ description: Separator between cells; the default is ,
+ type: string
+ default: ","
+ maxLength: 1
+ minLength: 1
+ annotations:
+ description: Https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns
+ type: array
+ items:
+ type: string
+ enum:
+ - "group"
+ - "datatype"
+ - "default"
+ uniqueItems: true
+ commentPrefix:
+ description: Character prefixed to comment strings
+ type: string
+ default: "#"
+ maxLength: 1
+ minLength: 0
+ dateTimeFormat:
+ description: Format of timestamps
+ type: string
+ default: "RFC3339"
+ enum:
+ - RFC3339
+ - RFC3339Nano
+ Permission:
+ required: [action, resource]
+ properties:
+ action:
+ type: string
+ enum:
+ - read
+ - write
+ resource:
+ type: object
+ required: [type]
+ properties:
+ type:
+ type: string
+ enum:
+ - authorizations
+ - buckets
+ - dashboards
+ - orgs
+ - sources
+ - tasks
+ - telegrafs
+ - users
+ - variables
+ - scrapers
+ - secrets
+ - labels
+ - views
+ - documents
+ - notificationRules
+ - notificationEndpoints
+ - checks
+ id:
+ type: string
+ nullable: true
+ description: If ID is set that is a permission for a specific resource. if it is not set it is a permission for all resources of that resource type.
+ name:
+ type: string
+ nullable: true
+ description: Optional name of the resource if the resource has a name field.
+ orgID:
+ type: string
+ nullable: true
+ description: If orgID is set that is a permission for all resources owned my that org. if it is not set it is a permission for all resources of that resource type.
+ org:
+ type: string
+ nullable: true
+ description: Optional name of the organization of the organization with orgID.
+ AuthorizationUpdateRequest:
+ properties:
+ status:
+ description: If inactive the token is inactive and requests using the token will be rejected.
+ default: active
+ type: string
+ enum:
+ - active
+ - inactive
+ description:
+ type: string
+ description: A description of the token.
+ Authorization:
+ required: [orgID, permissions]
+ allOf:
+ - $ref: "#/components/schemas/AuthorizationUpdateRequest"
+ - type: object
+ properties:
+ orgID:
+ type: string
+ description: ID of org that authorization is scoped to.
+ permissions:
+ type: array
+ minLength: 1
+ description: List of permissions for an auth. An auth must have at least one Permission.
+ items:
+ $ref: "#/components/schemas/Permission"
+ id:
+ readOnly: true
+ type: string
+ token:
+ readOnly: true
+ type: string
+ description: Passed via the Authorization Header and Token Authentication type.
+ userID:
+ readOnly: true
+ type: string
+ description: ID of user that created and owns the token.
+ user:
+ readOnly: true
+ type: string
+ description: Name of user that created and owns the token.
+ org:
+ readOnly: true
+ type: string
+ description: Name of the org token is scoped to.
+ links:
+ type: object
+ readOnly: true
+ example:
+ self: "/api/v2/authorizations/1"
+ user: "/api/v2/users/12"
+ properties:
+ self:
+ readOnly: true
+ $ref: "#/components/schemas/Link"
+ user:
+ readOnly: true
+ $ref: "#/components/schemas/Link"
+ Authorizations:
+ type: object
+ properties:
+ links:
+ readOnly: true
+ $ref: "#/components/schemas/Links"
+ authorizations:
+ type: array
+ items:
+ $ref: "#/components/schemas/Authorization"
+ Bucket:
+ properties:
+ links:
+ type: object
+ readOnly: true
+ example:
+ labels: "/api/v2/buckets/1/labels"
+ logs: "/api/v2/buckets/1/logs"
+ members: "/api/v2/buckets/1/members"
+ org: "/api/v2/orgs/2"
+ owners: "/api/v2/buckets/1/owners"
+ self: "/api/v2/buckets/1"
+ write: "/api/v2/write?org=2&bucket=1"
+ properties:
+ labels:
+ description: URL to retrieve labels for this bucket
+ $ref: "#/components/schemas/Link"
+ logs:
+ description: URL to retrieve operation logs for this bucket
+ $ref: "#/components/schemas/Link"
+ members:
+ description: URL to retrieve members that can read this bucket
+ $ref: "#/components/schemas/Link"
+ org:
+ description: URL to retrieve parent organization for this bucket
+ $ref: "#/components/schemas/Link"
+ owners:
+ description: URL to retrieve owners that can read and write to this bucket.
+ $ref: "#/components/schemas/Link"
+ self:
+ description: URL for this bucket
+ $ref: "#/components/schemas/Link"
+ write:
+ description: URL to write line protocol for this bucket
+ $ref: "#/components/schemas/Link"
+ id:
+ readOnly: true
+ type: string
+ type:
+ readOnly: true
+ type: string
+ default: user
+ enum:
+ - user
+ - system
+ name:
+ type: string
+ description:
+ type: string
+ orgID:
+ type: string
+ rp:
+ type: string
+ createdAt:
+ type: string
+ format: date-time
+ readOnly: true
+ updatedAt:
+ type: string
+ format: date-time
+ readOnly: true
+ retentionRules:
+ type: array
+ description: Rules to expire or retain data. No rules means data never expires.
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ default: expire
+ enum:
+ - expire
+ everySeconds:
+ type: integer
+ description: Duration in seconds for how long data will be kept in the database.
+ example: 86400
+ minimum: 1
+ required: [type, everySeconds]
+ labels:
+ $ref: "#/components/schemas/Labels"
+ required: [name, retentionRules]
+ Buckets:
+ type: object
+ properties:
+ links:
+ readOnly: true
+ $ref: "#/components/schemas/Links"
+ buckets:
+ type: array
+ items:
+ $ref: "#/components/schemas/Bucket"
+ Link:
+ type: string
+ format: uri
+ readOnly: true
+ description: URI of resource.
+ Links:
+ type: object
+ properties:
+ next:
+ $ref: "#/components/schemas/Link"
+ self:
+ $ref: "#/components/schemas/Link"
+ prev:
+ $ref: "#/components/schemas/Link"
+ required: [self]
+ Logs:
+ type: object
+ properties:
+ events:
+ readOnly: true
+ type: array
+ items:
+ $ref: "#/components/schemas/LogEvent"
+ LogEvent:
+ type: object
+ properties:
+ time:
+ readOnly: true
+ description: Time event occurred, RFC3339Nano.
+ type: string
+ format: date-time
+ message:
+ readOnly: true
+ description: A description of the event that occurred.
+ type: string
+ example: Halt and catch fire
+ OperationLog:
+ type: object
+ readOnly: true
+ properties:
+ description:
+ type: string
+ description: A description of the event that occurred.
+ example: Bucket Created
+ time:
+ type: string
+ description: Time event occurred, RFC3339Nano.
+ format: date-time
+ userID:
+ type: string
+ description: ID of the user who operated the event.
+ links:
+ type: object
+ properties:
+ user:
+ $ref: "#/components/schemas/Link"
+ OperationLogs:
+ type: object
+ properties:
+ logs:
+ type: array
+ items:
+ $ref: "#/components/schemas/OperationLog"
+ links:
+ $ref: "#/components/schemas/Links"
+ Organization:
+ properties:
+ links:
+ type: object
+ readOnly: true
+ example:
+ self: "/api/v2/orgs/1"
+ members: "/api/v2/orgs/1/members"
+ owners: "/api/v2/orgs/1/owners"
+ labels: "/api/v2/orgs/1/labels"
+ secrets: "/api/v2/orgs/1/secrets"
+ buckets: "/api/v2/buckets?org=myorg"
+ tasks: "/api/v2/tasks?org=myorg"
+ dashboards: "/api/v2/dashboards?org=myorg"
+ logs: "/api/v2/orgs/1/logs"
+ properties:
+ self:
+ $ref: "#/components/schemas/Link"
+ members:
+ $ref: "#/components/schemas/Link"
+ owners:
+ $ref: "#/components/schemas/Link"
+ labels:
+ $ref: "#/components/schemas/Link"
+ secrets:
+ $ref: "#/components/schemas/Link"
+ buckets:
+ $ref: "#/components/schemas/Link"
+ tasks:
+ $ref: "#/components/schemas/Link"
+ dashboards:
+ $ref: "#/components/schemas/Link"
+ logs:
+ $ref: "#/components/schemas/Link"
+ id:
+ readOnly: true
+ type: string
+ name:
+ type: string
+ description:
+ type: string
+ createdAt:
+ type: string
+ format: date-time
+ readOnly: true
+ updatedAt:
+ type: string
+ format: date-time
+ readOnly: true
+ status:
+ description: If inactive the organization is inactive.
+ default: active
+ type: string
+ enum:
+ - active
+ - inactive
+ required: [name]
+ Organizations:
+ type: object
+ properties:
+ links:
+ $ref: "#/components/schemas/Links"
+ orgs:
+ type: array
+ items:
+ $ref: "#/components/schemas/Organization"
+ Runs:
+ type: object
+ properties:
+ links:
+ readOnly: true
+ $ref: "#/components/schemas/Links"
+ runs:
+ type: array
+ items:
+ $ref: "#/components/schemas/Run"
+ Run:
+ properties:
+ id:
+ readOnly: true
+ type: string
+ taskID:
+ readOnly: true
+ type: string
+ status:
+ readOnly: true
+ type: string
+ enum:
+ - scheduled
+ - started
+ - failed
+ - success
+ - canceled
+ scheduledFor:
+ description: Time used for run's "now" option, RFC3339.
+ type: string
+ format: date-time
+ log:
+ description: An array of logs associated with the run.
+ type: array
+ readOnly: true
+ items:
+ type: object
+ properties:
+ runID:
+ type: string
+ time:
+ type: string
+ message:
+ type: string
+ startedAt:
+ readOnly: true
+ description: Time run started executing, RFC3339Nano.
+ type: string
+ format: date-time
+ finishedAt:
+ readOnly: true
+ description: Time run finished executing, RFC3339Nano.
+ type: string
+ format: date-time
+ requestedAt:
+ readOnly: true
+ description: Time run was manually requested, RFC3339Nano.
+ type: string
+ format: date-time
+ links:
+ type: object
+ readOnly: true
+ example:
+ self: "/api/v2/tasks/1/runs/1"
+ task: "/api/v2/tasks/1"
+ retry: "/api/v2/tasks/1/runs/1/retry"
+ logs: "/api/v2/tasks/1/runs/1/logs"
+ properties:
+ self:
+ type: string
+ format: uri
+ task:
+ type: string
+ format: uri
+ logs:
+ type: string
+ format: uri
+ retry:
+ type: string
+ format: uri
+ RunManually:
+ properties:
+ scheduledFor:
+ nullable: true
+ description: Time used for run's "now" option, RFC3339. Default is the server's now time.
+ type: string
+ format: date-time
+ Tasks:
+ type: object
+ properties:
+ links:
+ readOnly: true
+ $ref: "#/components/schemas/Links"
+ tasks:
+ type: array
+ items:
+ $ref: "#/components/schemas/Task"
+ Task:
+ type: object
+ properties:
+ id:
+ readOnly: true
+ type: string
+ type:
+ description: The type of task, this can be used for filtering tasks on list actions.
+ type: string
+ orgID:
+ description: The ID of the organization that owns this Task.
+ type: string
+ org:
+ description: The name of the organization that owns this Task.
+ type: string
+ name:
+ description: The name of the task.
+ type: string
+ description:
+ description: An optional description of the task.
+ type: string
+ status:
+ $ref: "#/components/schemas/TaskStatusType"
+ labels:
+ $ref: "#/components/schemas/Labels"
+ authorizationID:
+ description: The ID of the authorization used when this task communicates with the query engine.
+ type: string
+ flux:
+ description: The Flux script to run for this task.
+ type: string
+ every:
+ description: A simple task repetition schedule; parsed from Flux.
+ type: string
+ cron:
+ description: A task repetition schedule in the form '* * * * * *'; parsed from Flux.
+ type: string
+ offset:
+ description: Duration to delay after the schedule, before executing the task; parsed from flux, if set to zero it will remove this option and use 0 as the default.
+ type: string
+ latestCompleted:
+ description: Timestamp of latest scheduled, completed run, RFC3339.
+ type: string
+ format: date-time
+ readOnly: true
+ createdAt:
+ type: string
+ format: date-time
+ readOnly: true
+ updatedAt:
+ type: string
+ format: date-time
+ readOnly: true
+ links:
+ type: object
+ readOnly: true
+ example:
+ self: "/api/v2/tasks/1"
+ owners: "/api/v2/tasks/1/owners"
+ members: "/api/v2/tasks/1/members"
+ labels: "/api/v2/tasks/1/labels"
+ runs: "/api/v2/tasks/1/runs"
+ logs: "/api/v2/tasks/1/logs"
+ properties:
+ self:
+ $ref: "#/components/schemas/Link"
+ owners:
+ $ref: "#/components/schemas/Link"
+ members:
+ $ref: "#/components/schemas/Link"
+ runs:
+ $ref: "#/components/schemas/Link"
+ logs:
+ $ref: "#/components/schemas/Link"
+ labels:
+ $ref: "#/components/schemas/Link"
+ required: [id, name, orgID, flux]
+ TaskStatusType:
+ type: string
+ enum: [active, inactive]
+ User:
+ properties:
+ id:
+ readOnly: true
+ type: string
+ oauthID:
+ type: string
+ name:
+ type: string
+ status:
+ description: If inactive the user is inactive.
+ default: active
+ type: string
+ enum:
+ - active
+ - inactive
+ links:
+ type: object
+ readOnly: true
+ example:
+ self: "/api/v2/users/1"
+ logs: "/api/v2/users/1/logs"
+ properties:
+ self:
+ type: string
+ format: uri
+ logs:
+ type: string
+ format: uri
+ required: [name]
+ Users:
+ type: object
+ properties:
+ links:
+ type: object
+ properties:
+ self:
+ type: string
+ format: uri
+ users:
+ type: array
+ items:
+ $ref: "#/components/schemas/User"
+ ResourceMember:
+ allOf:
+ - $ref: "#/components/schemas/User"
+ - type: object
+ properties:
+ role:
+ type: string
+ default: member
+ enum:
+ - member
+ ResourceMembers:
+ type: object
+ properties:
+ links:
+ type: object
+ properties:
+ self:
+ type: string
+ format: uri
+ users:
+ type: array
+ items:
+ $ref: "#/components/schemas/ResourceMember"
+ ResourceOwner:
+ allOf:
+ - $ref: "#/components/schemas/User"
+ - type: object
+ properties:
+ role:
+ type: string
+ default: owner
+ enum:
+ - owner
+ ResourceOwners:
+ type: object
+ properties:
+ links:
+ type: object
+ properties:
+ self:
+ type: string
+ format: uri
+ users:
+ type: array
+ items:
+ $ref: "#/components/schemas/ResourceOwner"
+ FluxSuggestions:
+ type: object
+ properties:
+ funcs:
+ type: array
+ items:
+ $ref: "#/components/schemas/FluxSuggestion"
+ FluxSuggestion:
+ type: object
+ properties:
+ name:
+ type: string
+ params:
+ type: object
+ additionalProperties:
+ type: string
+ Routes:
+ properties:
+ authorizations:
+ type: string
+ format: uri
+ buckets:
+ type: string
+ format: uri
+ dashboards:
+ type: string
+ format: uri
+ external:
+ type: object
+ properties:
+ statusFeed:
+ type: string
+ format: uri
+ variables:
+ type: string
+ format: uri
+ me:
+ type: string
+ format: uri
+ orgs:
+ type: string
+ format: uri
+ query:
+ type: object
+ properties:
+ self:
+ type: string
+ format: uri
+ ast:
+ type: string
+ format: uri
+ analyze:
+ type: string
+ format: uri
+ suggestions:
+ type: string
+ format: uri
+ setup:
+ type: string
+ format: uri
+ signin:
+ type: string
+ format: uri
+ signout:
+ type: string
+ format: uri
+ sources:
+ type: string
+ format: uri
+ system:
+ type: object
+ properties:
+ metrics:
+ type: string
+ format: uri
+ debug:
+ type: string
+ format: uri
+ health:
+ type: string
+ format: uri
+ tasks:
+ type: string
+ format: uri
+ telegrafs:
+ type: string
+ format: uri
+ users:
+ type: string
+ format: uri
+ write:
+ type: string
+ format: uri
+ Error:
+ properties:
+ code:
+ description: Code is the machine-readable error code.
+ readOnly: true
+ type: string
+ # This set of enumerations must remain in sync with the constants defined in errors.go
+ enum:
+ - internal error
+ - not found
+ - conflict
+ - invalid
+ - unprocessable entity
+ - empty value
+ - unavailable
+ - forbidden
+ - too many requests
+ - unauthorized
+ - method not allowed
+ message:
+ readOnly: true
+ description: Message is a human-readable message.
+ type: string
+ required: [code, message]
+ LineProtocolError:
+ properties:
+ code:
+ description: Code is the machine-readable error code.
+ readOnly: true
+ type: string
+ enum:
+ - internal error
+ - not found
+ - conflict
+ - invalid
+ - empty value
+ - unavailable
+ message:
+ readOnly: true
+ description: Message is a human-readable message.
+ type: string
+ op:
+ readOnly: true
+ description: Op describes the logical code operation during error. Useful for debugging.
+ type: string
+ err:
+ readOnly: true
+ description: Err is a stack of errors that occurred during processing of the request. Useful for debugging.
+ type: string
+ line:
+ readOnly: true
+ description: First line within sent body containing malformed data
+ type: integer
+ format: int32
+ required: [code, message, op, err]
+ LineProtocolLengthError:
+ properties:
+ code:
+ description: Code is the machine-readable error code.
+ readOnly: true
+ type: string
+ enum:
+ - invalid
+ message:
+ readOnly: true
+ description: Message is a human-readable message.
+ type: string
+ maxLength:
+ readOnly: true
+ description: Max length in bytes for a body of line-protocol.
+ type: integer
+ format: int32
+ required: [code, message, maxLength]
+ Field:
+ type: object
+ properties:
+ value:
+ description: >-
+ value is the value of the field. Meaning of the value is implied by
+ the `type` key
+ type: string
+ type:
+ description: >-
+ `type` describes the field type. `func` is a function. `field` is a field reference.
+ type: string
+ enum:
+ - func
+ - field
+ - integer
+ - number
+ - regex
+ - wildcard
+ alias:
+ description: >-
+ Alias overrides the field name in the returned response. Applies only
+ if type is `func`
+ type: string
+ args:
+ description: Args are the arguments to the function
+ type: array
+ items:
+ $ref: '#/components/schemas/Field'
+ BuilderConfig:
+ type: object
+ properties:
+ buckets:
+ type: array
+ items:
+ type: string
+ tags:
+ type: array
+ items:
+ $ref: '#/components/schemas/BuilderTagsType'
+ functions:
+ type: array
+ items:
+ $ref: '#/components/schemas/BuilderFunctionsType'
+ aggregateWindow:
+ type: object
+ properties:
+ period:
+ type: string
+ BuilderTagsType:
+ type: object
+ properties:
+ key:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ BuilderFunctionsType:
+ type: object
+ properties:
+ name:
+ type: string
+ DashboardQuery:
+ type: object
+ properties:
+ text:
+ type: string
+ description: The text of the Flux query.
+ editMode:
+ $ref: '#/components/schemas/QueryEditMode'
+ name:
+ type: string
+ builderConfig:
+ $ref: '#/components/schemas/BuilderConfig'
+ QueryEditMode:
+ type: string
+ enum: ['builder', 'advanced']
+ Axis:
+ type: object
+ description: The description of a particular axis for a visualization.
+ properties:
+ bounds:
+ type: array
+ minItems: 0
+ maxItems: 2
+ description: >-
+ The extents of an axis in the form [lower, upper]. Clients determine
+ whether bounds are to be inclusive or exclusive of their limits
+ items:
+ type: string
+ label:
+ description: Label is a description of this Axis
+ type: string
+ prefix:
+ description: Prefix represents a label prefix for formatting axis values.
+ type: string
+ suffix:
+ description: Suffix represents a label suffix for formatting axis values.
+ type: string
+ base:
+ description: Base represents the radix for formatting axis values.
+ type: string
+ enum: ['', '2', '10']
+ scale:
+ $ref: '#/components/schemas/AxisScale'
+ AxisScale:
+ description: 'Scale is the axis formatting scale. Supported: "log", "linear"'
+ type: string
+ enum: ['log', 'linear']
+ DashboardColor:
+ type: object
+ description: Defines an encoding of data value into color space.
+ required: [id, type, hex, name, value]
+ properties:
+ id:
+ description: The unique ID of the view color.
+ type: string
+ type:
+ description: Type is how the color is used.
+ type: string
+ enum:
+ - min
+ - max
+ - threshold
+ - scale
+ - text
+ - background
+ hex:
+ description: The hex number of the color
+ type: string
+ maxLength: 7
+ minLength: 7
+ name:
+ description: The user-facing name of the hex color.
+ type: string
+ value:
+ description: The data value mapped to this color.
+ type: number
+ format: float
+ RenamableField:
+ description: Describes a field that can be renamed and made visible or invisible.
+ type: object
+ properties:
+ internalName:
+ description: The calculated name of a field.
+ readOnly: true
+ type: string
+ displayName:
+ description: The name that a field is renamed to by the user.
+ type: string
+ visible:
+ description: Indicates whether this field should be visible on the table.
+ type: boolean
+ XYViewProperties:
+ type: object
+ required:
+ - type
+ - geom
+ - queries
+ - shape
+ - axes
+ - colors
+ - legend
+ - note
+ - showNoteWhenEmpty
+ properties:
+ type:
+ type: string
+ enum: [xy]
+ queries:
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardQuery"
+ colors:
+ description: Colors define color encoding of data into a visualization
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardColor"
+ shape:
+ type: string
+ enum: ['chronograf-v2']
+ note:
+ type: string
+ showNoteWhenEmpty:
+ description: If true, will display note when empty
+ type: boolean
+ axes:
+ $ref: '#/components/schemas/Axes'
+ legend:
+ $ref: '#/components/schemas/Legend'
+ xColumn:
+ type: string
+ yColumn:
+ type: string
+ shadeBelow:
+ type: boolean
+ geom:
+ $ref: '#/components/schemas/XYGeom'
+ XYGeom:
+ type: string
+ enum: [line, step, stacked, bar, monotoneX]
+ LinePlusSingleStatProperties:
+ type: object
+ required:
+ - type
+ - queries
+ - shape
+ - axes
+ - colors
+ - legend
+ - note
+ - showNoteWhenEmpty
+ - prefix
+ - suffix
+ - decimalPlaces
+ properties:
+ type:
+ type: string
+ enum: [line-plus-single-stat]
+ queries:
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardQuery"
+ colors:
+ description: Colors define color encoding of data into a visualization
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardColor"
+ shape:
+ type: string
+ enum: ['chronograf-v2']
+ note:
+ type: string
+ showNoteWhenEmpty:
+ description: If true, will display note when empty
+ type: boolean
+ axes:
+ $ref: '#/components/schemas/Axes'
+ legend:
+ $ref: '#/components/schemas/Legend'
+ xColumn:
+ type: string
+ yColumn:
+ type: string
+ shadeBelow:
+ type: boolean
+ prefix:
+ type: string
+ suffix:
+ type: string
+ decimalPlaces:
+ $ref: '#/components/schemas/DecimalPlaces'
+ ScatterViewProperties:
+ type: object
+ required:
+ - type
+ - queries
+ - colors
+ - shape
+ - note
+ - showNoteWhenEmpty
+ - xColumn
+ - yColumn
+ - fillColumns
+ - symbolColumns
+ - xDomain
+ - yDomain
+ - xAxisLabel
+ - yAxisLabel
+ - xPrefix
+ - yPrefix
+ - xSuffix
+ - ySuffix
+ properties:
+ type:
+ type: string
+ enum: [scatter]
+ queries:
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardQuery"
+ colors:
+ description: Colors define color encoding of data into a visualization
+ type: array
+ items:
+ type: string
+ shape:
+ type: string
+ enum: ['chronograf-v2']
+ note:
+ type: string
+ showNoteWhenEmpty:
+ description: If true, will display note when empty
+ type: boolean
+ xColumn:
+ type: string
+ yColumn:
+ type: string
+ fillColumns:
+ type: array
+ items:
+ type: string
+ symbolColumns:
+ type: array
+ items:
+ type: string
+ xDomain:
+ type: array
+ items:
+ type: number
+ maxItems: 2
+ yDomain:
+ type: array
+ items:
+ type: number
+ maxItems: 2
+ xAxisLabel:
+ type: string
+ yAxisLabel:
+ type: string
+ xPrefix:
+ type: string
+ xSuffix:
+ type: string
+ yPrefix:
+ type: string
+ ySuffix:
+ type: string
+ HeatmapViewProperties:
+ type: object
+ required:
+ - type
+ - queries
+ - colors
+ - shape
+ - note
+ - showNoteWhenEmpty
+ - xColumn
+ - yColumn
+ - xDomain
+ - yDomain
+ - xAxisLabel
+ - yAxisLabel
+ - xPrefix
+ - yPrefix
+ - xSuffix
+ - ySuffix
+ - binSize
+ properties:
+ type:
+ type: string
+ enum: [heatmap]
+ queries:
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardQuery"
+ colors:
+ description: Colors define color encoding of data into a visualization
+ type: array
+ items:
+ type: string
+ shape:
+ type: string
+ enum: ['chronograf-v2']
+ note:
+ type: string
+ showNoteWhenEmpty:
+ description: If true, will display note when empty
+ type: boolean
+ xColumn:
+ type: string
+ yColumn:
+ type: string
+ xDomain:
+ type: array
+ items:
+ type: number
+ maxItems: 2
+ yDomain:
+ type: array
+ items:
+ type: number
+ maxItems: 2
+ xAxisLabel:
+ type: string
+ yAxisLabel:
+ type: string
+ xPrefix:
+ type: string
+ xSuffix:
+ type: string
+ yPrefix:
+ type: string
+ ySuffix:
+ type: string
+ binSize:
+ type: number
+ SingleStatViewProperties:
+ type: object
+ required:
+ - type
+ - queries
+ - colors
+ - shape
+ - note
+ - showNoteWhenEmpty
+ - prefix
+ - suffix
+ - legend
+ - decimalPlaces
+ properties:
+ type:
+ type: string
+ enum: [single-stat]
+ queries:
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardQuery"
+ colors:
+ description: Colors define color encoding of data into a visualization
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardColor"
+ shape:
+ type: string
+ enum: ['chronograf-v2']
+ note:
+ type: string
+ showNoteWhenEmpty:
+ description: If true, will display note when empty
+ type: boolean
+ prefix:
+ type: string
+ suffix:
+ type: string
+ legend:
+ $ref: '#/components/schemas/Legend'
+ decimalPlaces:
+ $ref: "#/components/schemas/DecimalPlaces"
+ HistogramViewProperties:
+ type: object
+ required:
+ - type
+ - queries
+ - colors
+ - shape
+ - note
+ - showNoteWhenEmpty
+ - xColumn
+ - fillColumns
+ - xDomain
+ - xAxisLabel
+ - position
+ - binCount
+ properties:
+ type:
+ type: string
+ enum: [histogram]
+ queries:
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardQuery"
+ colors:
+ description: Colors define color encoding of data into a visualization
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardColor"
+ shape:
+ type: string
+ enum: ['chronograf-v2']
+ note:
+ type: string
+ showNoteWhenEmpty:
+ description: If true, will display note when empty
+ type: boolean
+ xColumn:
+ type: string
+ fillColumns:
+ type: array
+ items:
+ type: string
+ xDomain:
+ type: array
+ items:
+ type: number
+ format: float
+ xAxisLabel:
+ type: string
+ position:
+ type: string
+ enum: [overlaid, stacked]
+ binCount:
+ type: integer
+ GaugeViewProperties:
+ type: object
+ required:
+ - type
+ - queries
+ - colors
+ - shape
+ - note
+ - showNoteWhenEmpty
+ - prefix
+ - suffix
+ - legend
+ - decimalPlaces
+ properties:
+ type:
+ type: string
+ enum: [gauge]
+ queries:
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardQuery"
+ colors:
+ description: Colors define color encoding of data into a visualization
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardColor"
+ shape:
+ type: string
+ enum: ['chronograf-v2']
+ note:
+ type: string
+ showNoteWhenEmpty:
+ description: If true, will display note when empty
+ type: boolean
+ prefix:
+ type: string
+ suffix:
+ type: string
+ legend:
+ $ref: '#/components/schemas/Legend'
+ decimalPlaces:
+ $ref: "#/components/schemas/DecimalPlaces"
+ TableViewProperties:
+ type: object
+ required:
+ - type
+ - queries
+ - colors
+ - shape
+ - note
+ - showNoteWhenEmpty
+ - tableOptions
+ - fieldOptions
+ - timeFormat
+ - decimalPlaces
+ properties:
+ type:
+ type: string
+ enum: [table]
+ queries:
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardQuery"
+ colors:
+ description: Colors define color encoding of data into a visualization
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardColor"
+ shape:
+ type: string
+ enum: ['chronograf-v2']
+ note:
+ type: string
+ showNoteWhenEmpty:
+ description: If true, will display note when empty
+ type: boolean
+ tableOptions:
+ properties:
+ verticalTimeAxis:
+ description: >-
+ verticalTimeAxis describes the orientation of the table by
+ indicating whether the time axis will be displayed vertically
+ type: boolean
+ sortBy:
+ $ref: "#/components/schemas/RenamableField"
+ wrapping:
+ description: Wrapping describes the text wrapping style to be used in table views
+ type: string
+ enum:
+ - truncate
+ - wrap
+ - single-line
+ fixFirstColumn:
+ description: >-
+ fixFirstColumn indicates whether the first column of the table
+ should be locked
+ type: boolean
+ fieldOptions:
+ description: >-
+ fieldOptions represent the fields retrieved by the query with
+ customization options
+ type: array
+ items:
+ $ref: '#/components/schemas/RenamableField'
+ timeFormat:
+ description: >-
+ timeFormat describes the display format for time values according to
+ moment.js date formatting
+ type: string
+ decimalPlaces:
+ $ref: '#/components/schemas/DecimalPlaces'
+ MarkdownViewProperties:
+ type: object
+ required:
+ - type
+ - shape
+ - note
+ properties:
+ type:
+ type: string
+ enum: [markdown]
+ shape:
+ type: string
+ enum: ['chronograf-v2']
+ note:
+ type: string
+ CheckViewProperties:
+ type: object
+ required:
+ - type
+ - shape
+ - checkID
+ - queries
+ - colors
+ properties:
+ type:
+ type: string
+ enum: [check]
+ shape:
+ type: string
+ enum: ['chronograf-v2']
+ checkID:
+ type: string
+ check:
+ $ref: '#/components/schemas/Check'
+ queries:
+ type: array
+ items:
+ $ref: "#/components/schemas/DashboardQuery"
+ colors:
+ description: Colors define color encoding of data into a visualization
+ type: array
+ items:
+ type: string
+ Axes:
+ description: The viewport for a View's visualizations
+ type: object
+ required: ['x', 'y']
+ properties:
+ x:
+ $ref: '#/components/schemas/Axis'
+ "y": # Quoted to prevent YAML parser from interpreting y as shorthand for true.
+ $ref: '#/components/schemas/Axis'
+ Legend:
+ description: Legend define encoding of data into a view's legend
+ type: object
+ properties:
+ type:
+ description: The style of the legend.
+ type: string
+ enum:
+ - static
+ orientation:
+ description: >-
+ orientation is the location of the legend with respect to the view
+ graph
+ type: string
+ enum:
+ - top
+ - bottom
+ - left
+ - right
+ DecimalPlaces:
+ description: Indicates whether decimal places should be enforced, and how many digits it should show.
+ type: object
+ properties:
+ isEnforced:
+ description: Indicates whether decimal point setting should be enforced
+ type: boolean
+ digits:
+ description: The number of digits after decimal to display
+ type: integer
+ format: int32
+ ConstantVariableProperties:
+ properties:
+ type:
+ type: string
+ enum: [constant]
+ values:
+ type: array
+ items:
+ type: string
+ MapVariableProperties:
+ properties:
+ type:
+ type: string
+ enum: [map]
+ values:
+ type: object
+ additionalProperties:
+ type: string
+ QueryVariableProperties:
+ properties:
+ type:
+ type: string
+ enum: [query]
+ values:
+ type: object
+ properties:
+ query:
+ type: string
+ language:
+ type: string
+ Variable:
+ type: object
+ required:
+ - name
+ - orgID
+ - arguments
+ properties:
+ links:
+ type: object
+ readOnly: true
+ properties:
+ self:
+ type: string
+ format: uri
+ org:
+ type: string
+ format: uri
+ labels:
+ type: string
+ format: uri
+ id:
+ readOnly: true
+ type: string
+ orgID:
+ type: string
+ name:
+ type: string
+ description:
+ type: string
+ selected:
+ type: array
+ items:
+ type: string
+ labels:
+ $ref: "#/components/schemas/Labels"
+ arguments:
+ type: object
+ oneOf:
+ - $ref: "#/components/schemas/QueryVariableProperties"
+ - $ref: "#/components/schemas/ConstantVariableProperties"
+ - $ref: "#/components/schemas/MapVariableProperties"
+ createdAt:
+ type: string
+ format: date-time
+ updatedAt:
+ type: string
+ format: date-time
+ Variables:
+ type: object
+ example:
+ variables:
+ - id: '1221432'
+ name: ":ok:"
+ selected:
+ - hello
+ arguments:
+ type: constant
+ values:
+ - howdy
+ - hello
+ - hi
+ - yo
+ - oy
+ - id: '1221432'
+ name: ":ok:"
+ selected:
+ - c
+ arguments:
+ type: map
+ values:
+ a: fdjaklfdjkldsfjlkjdsa
+ b: dfaksjfkljekfajekdljfas
+ c: fdjksajfdkfeawfeea
+ - id: '1221432'
+ name: ":ok:"
+ selected:
+ - host
+ arguments:
+ type: query
+ query: 'from(bucket: "foo") |> showMeasurements()'
+ language: flux
+ properties:
+ variables:
+ type: array
+ items:
+ $ref: "#/components/schemas/Variable"
+ ViewProperties:
+ oneOf:
+ - $ref: "#/components/schemas/LinePlusSingleStatProperties"
+ - $ref: "#/components/schemas/XYViewProperties"
+ - $ref: "#/components/schemas/SingleStatViewProperties"
+ - $ref: "#/components/schemas/HistogramViewProperties"
+ - $ref: "#/components/schemas/GaugeViewProperties"
+ - $ref: "#/components/schemas/TableViewProperties"
+ - $ref: "#/components/schemas/MarkdownViewProperties"
+ - $ref: "#/components/schemas/CheckViewProperties"
+ - $ref: "#/components/schemas/ScatterViewProperties"
+ - $ref: "#/components/schemas/HeatmapViewProperties"
+ View:
+ required:
+ - name
+ - properties
+ properties:
+ links:
+ type: object
+ readOnly: true
+ properties:
+ self:
+ type: string
+ id:
+ readOnly: true
+ type: string
+ name:
+ type: string
+ properties:
+ $ref: '#/components/schemas/ViewProperties'
+ Views:
+ type: object
+ properties:
+ links:
+ type: object
+ properties:
+ self:
+ type: string
+ views:
+ type: array
+ items:
+ $ref: "#/components/schemas/View"
+ CellUpdate:
+ type: object
+ properties:
+ x:
+ type: integer
+ format: int32
+ "y": # Quoted to prevent YAML parser from interpreting y as shorthand for true.
+ type: integer
+ format: int32
+ w:
+ type: integer
+ format: int32
+ h:
+ type: integer
+ format: int32
+ CreateCell:
+ type: object
+ properties:
+ name:
+ type: string
+ x:
+ type: integer
+ format: int32
+ "y": # Quoted to prevent YAML parser from interpreting y as shorthand for true.
+ type: integer
+ format: int32
+ w:
+ type: integer
+ format: int32
+ h:
+ type: integer
+ format: int32
+ usingView:
+ type: string
+ description: Makes a copy of the provided view.
+ AnalyzeQueryResponse:
+ type: object
+ properties:
+ errors:
+ type: array
+ items:
+ type: object
+ properties:
+ line:
+ type: integer
+ column:
+ type: integer
+ character:
+ type: integer
+ message:
+ type: string
+ Cell:
+ type: object
+ properties:
+ id:
+ type: string
+ links:
+ type: object
+ properties:
+ self:
+ type: string
+ view:
+ type: string
+ x:
+ type: integer
+ format: int32
+ "y": # Quoted to prevent YAML parser from interpreting y as shorthand for true.
+ type: integer
+ format: int32
+ w:
+ type: integer
+ format: int32
+ h:
+ type: integer
+ format: int32
+ viewID:
+ type: string
+ description: The reference to a view from the views API.
+ Cells:
+ type: array
+ items:
+ $ref: "#/components/schemas/Cell"
+ Secrets:
+ additionalProperties:
+ type: string
+ example:
+ apikey: abc123xyz
+ SecretKeys:
+ type: object
+ properties:
+ secrets:
+ type: array
+ items:
+ type: string
+ SecretKeysResponse:
+ allOf:
+ - $ref: "#/components/schemas/SecretKeys"
+ - type: object
+ properties:
+ links:
+ readOnly: true
+ type: object
+ properties:
+ self:
+ type: string
+ org:
+ type: string
+ CreateDashboardRequest:
+ properties:
+ orgID:
+ type: string
+ description: The ID of the organization that owns the dashboard.
+ name:
+ type: string
+ description: The user-facing name of the dashboard.
+ description:
+ type: string
+ description: The user-facing description of the dashboard.
+ required:
+ - orgID
+ - name
+ Dashboard:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/CreateDashboardRequest"
+ - type: object
+ properties:
+ links:
+ type: object
+ example:
+ self: "/api/v2/dashboards/1"
+ cells: "/api/v2/dashboards/1/cells"
+ owners: "/api/v2/dashboards/1/owners"
+ members: "/api/v2/dashboards/1/members"
+ logs: "/api/v2/dashboards/1/logs"
+ labels: "/api/v2/dashboards/1/labels"
+ org: "/api/v2/labels/1"
+ properties:
+ self:
+ $ref: "#/components/schemas/Link"
+ cells:
+ $ref: "#/components/schemas/Link"
+ members:
+ $ref: "#/components/schemas/Link"
+ owners:
+ $ref: "#/components/schemas/Link"
+ logs:
+ $ref: "#/components/schemas/Link"
+ labels:
+ $ref: "#/components/schemas/Link"
+ org:
+ $ref: "#/components/schemas/Link"
+ id:
+ readOnly: true
+ type: string
+ meta:
+ type: object
+ properties:
+ createdAt:
+ type: string
+ format: date-time
+ updatedAt:
+ type: string
+ format: date-time
+ cells:
+ $ref: "#/components/schemas/Cells"
+ labels:
+ $ref: "#/components/schemas/Labels"
+ Dashboards:
+ type: object
+ properties:
+ links:
+ $ref: "#/components/schemas/Links"
+ dashboards:
+ type: array
+ items:
+ $ref: "#/components/schemas/Dashboard"
+ Source:
+ type: object
+ properties:
+ links:
+ type: object
+ properties:
+ self:
+ type: string
+ query:
+ type: string
+ health:
+ type: string
+ buckets:
+ type: string
+ id:
+ type: string
+ orgID:
+ type: string
+ default:
+ type: boolean
+ name:
+ type: string
+ type:
+ type: string
+ enum: ["v1","v2","self"]
+ url:
+ type: string
+ format: uri
+ insecureSkipVerify:
+ type: boolean
+ telegraf:
+ type: string
+ token:
+ type: string
+ username:
+ type: string
+ password:
+ type: string
+ sharedSecret:
+ type: string
+ metaUrl:
+ type: string
+ format: uri
+ defaultRP:
+ type: string
+ languages:
+ type: array
+ readOnly: true
+ items:
+ type: string
+ enum:
+ - flux
+ - influxql
+ Sources:
+ type: object
+ properties:
+ links:
+ type: object
+ properties:
+ self:
+ type: string
+ format: uri
+ sources:
+ type: array
+ items:
+ $ref: "#/components/schemas/Source"
+ ScraperTargetRequest:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The name of the scraper target.
+ type:
+ type: string
+ description: The type of the metrics to be parsed.
+ enum: [prometheus]
+ url:
+ type: string
+ description: The URL of the metrics endpoint.
+ example: http://localhost:9090/metrics
+ orgID:
+ type: string
+ description: The organization ID.
+ bucketID:
+ type: string
+ description: The ID of the bucket to write to.
+ ScraperTargetResponse:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/ScraperTargetRequest"
+ - type: object
+ properties:
+ id:
+ type: string
+ readOnly: true
+ org:
+ type: string
+ description: The organization name.
+ bucket:
+ type: string
+ description: The bucket name.
+ links:
+ type: object
+ readOnly: true
+ example:
+ self: "/api/v2/scrapers/1"
+ owners: "/api/v2/scrapers/1/owners"
+ members: "/api/v2/scrapers/1/members"
+ bucket: "/api/v2/buckets/1"
+ organization: "/api/v2/orgs/1"
+ properties:
+ self:
+ $ref: "#/components/schemas/Link"
+ members:
+ $ref: "#/components/schemas/Link"
+ owners:
+ $ref: "#/components/schemas/Link"
+ bucket:
+ $ref: "#/components/schemas/Link"
+ organization:
+ $ref: "#/components/schemas/Link"
+ ScraperTargetResponses:
+ type: object
+ properties:
+ configurations:
+ type: array
+ items:
+ $ref: "#/components/schemas/ScraperTargetResponse"
+ DocumentMeta:
+ type: object
+ properties:
+ name:
+ type: string
+ type:
+ type: string
+ templateID:
+ type: string
+ description:
+ type: string
+ version:
+ type: string
+ createdAt:
+ type: string
+ format: date-time
+ readOnly: true
+ updatedAt:
+ type: string
+ format: date-time
+ readOnly: true
+ required:
+ - name
+ - version
+ Document:
+ type: object
+ properties:
+ id:
+ type: string
+ readOnly: true
+ meta:
+ $ref: "#/components/schemas/DocumentMeta"
+ content:
+ type: object
+ labels:
+ $ref: "#/components/schemas/Labels"
+ links:
+ type: object
+ readOnly: true
+ example:
+ self: "/api/v2/documents/templates/1"
+ properties:
+ self:
+ description: The document URL.
+ $ref: "#/components/schemas/Link"
+ required:
+ - id
+ - meta
+ - content
+ DocumentCreate:
+ type: object
+ properties:
+ meta:
+ $ref: "#/components/schemas/DocumentMeta"
+ content:
+ type: object
+ org:
+ type: string
+ description: The organization Name. Specify either `orgID` or `org`.
+ orgID:
+ type: string
+ description: The organization Name. Specify either `orgID` or `org`.
+ labels:
+ type: array
+ description: An array of label IDs to be added as labels to the document.
+ items:
+ type: string
+ required:
+ - meta
+ - content
+ DocumentUpdate:
+ type: object
+ properties:
+ meta:
+ $ref: "#/components/schemas/DocumentMeta"
+ content:
+ type: object
+ DocumentListEntry:
+ type: object
+ properties:
+ id:
+ type: string
+ readOnly: true
+ meta:
+ $ref: "#/components/schemas/DocumentMeta"
+ labels:
+ $ref: "#/components/schemas/Labels"
+ links:
+ type: object
+ readOnly: true
+ example:
+ self: "/api/v2/documents/templates/1"
+ properties:
+ self:
+ description: The document URL.
+ $ref: "#/components/schemas/Link"
+ required:
+ - id
+ - meta
+ Documents:
+ type: object
+ properties:
+ documents:
+ type: array
+ items:
+ $ref: "#/components/schemas/DocumentListEntry"
+ TelegrafRequest:
+ type: object
+ properties:
+ name:
+ type: string
+ description:
+ type: string
+ agent:
+ type: object
+ properties:
+ collectionInterval:
+ type: integer
+ plugins:
+ type: array
+ items:
+ $ref: "#/components/schemas/TelegrafRequestPlugin"
+ orgID:
+ type: string
+ TelegrafRequestPlugin:
+ oneOf:
+ - $ref: '#/components/schemas/TelegrafPluginInputCpu'
+ - $ref: '#/components/schemas/TelegrafPluginInputDisk'
+ - $ref: '#/components/schemas/TelegrafPluginInputDiskio'
+ - $ref: '#/components/schemas/TelegrafPluginInputDocker'
+ - $ref: '#/components/schemas/TelegrafPluginInputFile'
+ - $ref: '#/components/schemas/TelegrafPluginInputKubernetes'
+ - $ref: '#/components/schemas/TelegrafPluginInputLogParser'
+ - $ref: '#/components/schemas/TelegrafPluginInputProcstat'
+ - $ref: '#/components/schemas/TelegrafPluginInputPrometheus'
+ - $ref: '#/components/schemas/TelegrafPluginInputRedis'
+ - $ref: '#/components/schemas/TelegrafPluginInputSyslog'
+ - $ref: '#/components/schemas/TelegrafPluginOutputFile'
+ - $ref: '#/components/schemas/TelegrafPluginOutputInfluxDBV2'
+ TelegrafPluginInputCpu:
+ type: object
+ required:
+ - name
+ - type
+ properties:
+ name:
+ type: string
+ enum: ["cpu"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ TelegrafPluginInputDisk:
+ type: object
+ required:
+ - name
+ - type
+ properties:
+ name:
+ type: string
+ enum: ["disk"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ TelegrafPluginInputDiskio:
+ type:
+ object
+ required:
+ - name
+ - type
+ properties:
+ name:
+ type: string
+ enum: ["diskio"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ TelegrafPluginInputDocker:
+ type:
+ object
+ required:
+ - name
+ - type
+ - config
+ properties:
+ name:
+ type: string
+ enum: ["docker"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ config:
+ $ref: '#/components/schemas/TelegrafPluginInputDockerConfig'
+ TelegrafPluginInputFile:
+ type:
+ object
+ required:
+ - name
+ - type
+ - config
+ properties:
+ name:
+ type: string
+ enum: ["file"]
+ type:
+ type: string
+ enum: [input]
+ comment:
+ type: string
+ config:
+ $ref: '#/components/schemas/TelegrafPluginInputFileConfig'
+ TelegrafPluginInputKernel:
+ type:
+ object
+ required:
+ - name
+ - type
+ properties:
+ name:
+ type: string
+ enum: ["kernel"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ TelegrafPluginInputKubernetes:
+ type:
+ object
+ required:
+ - name
+ - type
+ - config
+ properties:
+ name:
+ type: string
+ enum: ["kubernetes"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ config:
+ $ref: '#/components/schemas/TelegrafPluginInputKubernetesConfig'
+ TelegrafPluginInputLogParser:
+ type:
+ object
+ required:
+ - name
+ - type
+ - config
+ properties:
+ name:
+ type: string
+ enum: ["logparser"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ config:
+ $ref: '#/components/schemas/TelegrafPluginInputLogParserConfig'
+ TelegrafPluginInputMem:
+ type:
+ object
+ required:
+ - name
+ - type
+ properties:
+ name:
+ type: string
+ enum: ["mem"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ TelegrafPluginInputNetResponse:
+ type:
+ object
+ required:
+ - name
+ - type
+ properties:
+ name:
+ type: string
+ enum: ["net_response"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ TelegrafPluginInputNet:
+ type:
+ object
+ required:
+ - name
+ - type
+ properties:
+ name:
+ type: string
+ enum: ["net"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ TelegrafPluginInputNginx:
+ type:
+ object
+ required:
+ - name
+ - type
+ properties:
+ name:
+ type: string
+ enum: ["nginx"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ TelegrafPluginInputProcesses:
+ type:
+ object
+ required:
+ - name
+ - type
+ properties:
+ name:
+ type: string
+ enum: ["processes"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ TelegrafPluginInputProcstat:
+ type:
+ object
+ required:
+ - name
+ - type
+ - config
+ properties:
+ name:
+ type: string
+ enum: ["procstat"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ config:
+ $ref: '#/components/schemas/TelegrafPluginInputProcstatConfig'
+ TelegrafPluginInputPrometheus:
+ type:
+ object
+ required:
+ - name
+ - type
+ - config
+ properties:
+ name:
+ type: string
+ enum: ["prometheus"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ config:
+ $ref: '#/components/schemas/TelegrafPluginInputPrometheusConfig'
+ TelegrafPluginInputRedis:
+ type:
+ object
+ required:
+ - name
+ - type
+ - config
+ properties:
+ name:
+ type: string
+ enum: ["redis"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ config:
+ $ref: '#/components/schemas/TelegrafPluginInputRedisConfig'
+ TelegrafPluginInputSyslog:
+ type:
+ object
+ required:
+ - name
+ - type
+ - config
+ properties:
+ name:
+ type: string
+ enum: ["syslog"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ config:
+ $ref: '#/components/schemas/TelegrafPluginInputSyslogConfig'
+ TelegrafPluginInputSwap:
+ type:
+ object
+ required:
+ - name
+ - type
+ properties:
+ name:
+ type: string
+ enum: ["swap"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ TelegrafPluginInputSystem:
+ type:
+ object
+ required:
+ - name
+ - type
+ properties:
+ name:
+ type: string
+ enum: ["system"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ TelegrafPluginInputTail:
+ type:
+ object
+ required:
+ - name
+ - type
+ properties:
+ name:
+ type: string
+ enum: ["tail"]
+ type:
+ type: string
+ enum: ["input"]
+ comment:
+ type: string
+ TelegrafPluginOutputFile:
+ type:
+ object
+ required:
+ - name
+ - type
+ - config
+ properties:
+ name:
+ type: string
+ enum: ["file"]
+ type:
+ type: string
+ enum: ["output"]
+ comment:
+ type: string
+ config:
+ $ref: '#/components/schemas/TelegrafPluginOutputFileConfig'
+ TelegrafPluginOutputInfluxDBV2:
+ type:
+ object
+ required:
+ - name
+ - type
+ - config
+ properties:
+ name:
+ type: string
+ enum: ["influxdb_v2"]
+ type:
+ type: string
+ enum: ["output"]
+ comment:
+ type: string
+ config:
+ $ref: '#/components/schemas/TelegrafPluginOutputInfluxDBV2Config'
+ Telegraf:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/TelegrafRequest"
+ - type: object
+ properties:
+ id:
+ type: string
+ readOnly: true
+ links:
+ type: object
+ readOnly: true
+ example:
+ self: "/api/v2/telegrafs/1"
+ lables: "/api/v2/telegrafs/1/labels"
+ owners: "/api/v2/telegrafs/1/owners"
+ members: "/api/v2/telegrafs/1/members"
+ properties:
+ self:
+ $ref: "#/components/schemas/Link"
+ labels:
+ $ref: "#/components/schemas/Link"
+ members:
+ $ref: "#/components/schemas/Link"
+ owners:
+ $ref: "#/components/schemas/Link"
+ labels:
+ readOnly: true
+ $ref: "#/components/schemas/Labels"
+ Telegrafs:
+ type: object
+ properties:
+ configurations:
+ type: array
+ items:
+ $ref: "#/components/schemas/Telegraf"
+ TelegrafPluginInputDockerConfig:
+ type: object
+ required:
+ - endpoint
+ properties:
+ endpoint:
+ type: string
+ TelegrafPluginInputFileConfig:
+ type: object
+ properties:
+ files:
+ type: array
+ items:
+ type: string
+ TelegrafPluginInputKubernetesConfig:
+ type: object
+ properties:
+ url:
+ type: string
+ format: uri
+ TelegrafPluginInputLogParserConfig:
+ type: object
+ properties:
+ files:
+ type: array
+ items:
+ type: string
+ TelegrafPluginInputProcstatConfig:
+ type: object
+ properties:
+ exe:
+ type: string
+ TelegrafPluginInputPrometheusConfig:
+ type: object
+ properties:
+ urls:
+ type: array
+ items:
+ type: string
+ format: uri
+ TelegrafPluginInputRedisConfig:
+ type: object
+ properties:
+ servers:
+ type: array
+ items:
+ type: string
+ password:
+ type: string
+ TelegrafPluginInputSyslogConfig:
+ type: object
+ properties:
+ server:
+ type: string
+ TelegrafPluginOutputFileConfig:
+ type: object
+ required:
+ - files
+ properties:
+ files:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ enum: [stdout, path]
+ path:
+ type: string
+ TelegrafPluginOutputInfluxDBV2Config:
+ type: object
+ required:
+ - urls
+ - token
+ - organization
+ - bucket
+ properties:
+ urls:
+ type: array
+ items:
+ type: string
+ format: uri
+ token:
+ type: string
+ organization:
+ type: string
+ bucket:
+ type: string
+ IsOnboarding:
+ type: object
+ properties:
+ allowed:
+ description: True means that the influxdb instance has NOT had initial setup; false means that the database has been setup.
+ type: boolean
+ OnboardingRequest:
+ type: object
+ properties:
+ username:
+ type: string
+ password:
+ type: string
+ org:
+ type: string
+ bucket:
+ type: string
+ retentionPeriodHrs:
+ type: integer
+ required:
+ - username
+ - password
+ - org
+ - bucket
+ OnboardingResponse:
+ type: object
+ properties:
+ user:
+ $ref: "#/components/schemas/User"
+ org:
+ $ref: "#/components/schemas/Organization"
+ bucket:
+ $ref: "#/components/schemas/Bucket"
+ auth:
+ $ref: "#/components/schemas/Authorization"
+ PasswordResetBody:
+ properties:
+ password:
+ type: string
+ required:
+ - password
+ AddResourceMemberRequestBody:
+ type: object
+ properties:
+ id:
+ type: string
+ name:
+ type: string
+ required:
+ - id
+ Ready:
+ type: object
+ properties:
+ status:
+ type: string
+ enum:
+ - ready
+ started:
+ type: string
+ format: date-time
+ example: "2019-03-13T10:09:33.891196-04:00"
+ up:
+ type: string
+ example: "14m45.911966424s"
+ HealthCheck:
+ type: object
+ required:
+ - name
+ - status
+ properties:
+ name:
+ type: string
+ message:
+ type: string
+ checks:
+ type: array
+ items:
+ $ref: "#/components/schemas/HealthCheck"
+ status:
+ type: string
+ enum:
+ - pass
+ - fail
+ Labels:
+ type: array
+ items:
+ $ref: "#/components/schemas/Label"
+ Label:
+ type: object
+ properties:
+ id:
+ readOnly: true
+ type: string
+ orgID:
+ readOnly: true
+ type: string
+ name:
+ type: string
+ properties:
+ type: object
+ additionalProperties:
+ type: string
+ description: Key/Value pairs associated with this label. Keys can be removed by sending an update with an empty value.
+ example: {"color": "ffb3b3", "description": "this is a description"}
+ LabelCreateRequest:
+ type: object
+ required: [orgID]
+ properties:
+ orgID:
+ type: string
+ name:
+ type: string
+ properties:
+ type: object
+ additionalProperties:
+ type: string
+ description: Key/Value pairs associated with this label. Keys can be removed by sending an update with an empty value.
+ example: {"color": "ffb3b3", "description": "this is a description"}
+ LabelUpdate:
+ type: object
+ properties:
+ name:
+ type: string
+ properties:
+ type: object
+ description: Key/Value pairs associated with this label. Keys can be removed by sending an update with an empty value.
+ example: {"color": "ffb3b3", "description": "this is a description"}
+ LabelMapping:
+ type: object
+ properties:
+ labelID:
+ type: string
+ LabelsResponse:
+ type: object
+ properties:
+ labels:
+ $ref: "#/components/schemas/Labels"
+ links:
+ $ref: "#/components/schemas/Links"
+ LabelResponse:
+ type: object
+ properties:
+ label:
+ $ref: "#/components/schemas/Label"
+ links:
+ $ref: "#/components/schemas/Links"
+ ASTResponse:
+ description: Contains the AST for the supplied Flux query
+ type: object
+ properties:
+ ast:
+ $ref: "#/components/schemas/Package"
+ WritePrecision:
+ type: string
+ enum:
+ - ms
+ - s
+ - us
+ - ns
+ TaskCreateRequest:
+ type: object
+ properties:
+ type:
+ description: The type of task, this can be used for filtering tasks on list actions.
+ type: string
+ orgID:
+ description: The ID of the organization that owns this Task.
+ type: string
+ org:
+ description: The name of the organization that owns this Task.
+ type: string
+ status:
+ $ref: "#/components/schemas/TaskStatusType"
+ flux:
+ description: The Flux script to run for this task.
+ type: string
+ description:
+ description: An optional description of the task.
+ type: string
+ required: [flux]
+ TaskUpdateRequest:
+ type: object
+ properties:
+ status:
+ $ref: "#/components/schemas/TaskStatusType"
+ flux:
+ description: The Flux script to run for this task.
+ type: string
+ name:
+ description: Override the 'name' option in the flux script.
+ type: string
+ every:
+ description: Override the 'every' option in the flux script.
+ type: string
+ cron:
+ description: Override the 'cron' option in the flux script.
+ type: string
+ offset:
+ description: Override the 'offset' option in the flux script.
+ type: string
+ description:
+ description: An optional description of the task.
+ type: string
+ FluxResponse:
+ description: Rendered flux that backs the check or notification.
+ properties:
+ flux:
+ type: string
+ CheckPatch:
+ type: object
+ properties:
+ name:
+ type: string
+ description:
+ type: string
+ status:
+ type: string
+ enum:
+ - active
+ - inactive
+ Check:
+ oneOf:
+ - $ref: "#/components/schemas/DeadmanCheck"
+ - $ref: "#/components/schemas/ThresholdCheck"
+ discriminator:
+ propertyName: type
+ mapping:
+ deadman: "#/components/schemas/DeadmanCheck"
+ threshold: "#/components/schemas/ThresholdCheck"
+ Checks:
+ properties:
+ checks:
+ type: array
+ items:
+ $ref: "#/components/schemas/Check"
+ links:
+ $ref: "#/components/schemas/Links"
+ CheckBase:
+ properties:
+ id:
+ readOnly: true
+ type: string
+ name:
+ type: string
+ orgID:
+ description: The ID of the organization that owns this check.
+ type: string
+ ownerID:
+ description: The ID of creator used to create this check.
+ type: string
+ readOnly: true
+ createdAt:
+ type: string
+ format: date-time
+ readOnly: true
+ updatedAt:
+ type: string
+ format: date-time
+ readOnly: true
+ query:
+ $ref: "#/components/schemas/DashboardQuery"
+ status:
+ $ref: "#/components/schemas/TaskStatusType"
+ every:
+ description: Check repetition interval.
+ type: string
+ offset:
+ description: Duration to delay after the schedule, before executing check.
+ type: string
+ tags:
+ description: List of tags to write to each status.
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ value:
+ type: string
+ description:
+ description: An optional description of the check.
+ type: string
+ statusMessageTemplate:
+ description: The template used to generate and write a status message.
+ type: string
+ labels:
+ $ref: "#/components/schemas/Labels"
+ links:
+ type: object
+ readOnly: true
+ example:
+ self: "/api/v2/checks/1"
+ labels: "/api/v2/checks/1/labels"
+ members: "/api/v2/checks/1/members"
+ owners: "/api/v2/checks/1/owners"
+ properties:
+ self:
+ description: URL for this check
+ $ref: "#/components/schemas/Link"
+ labels:
+ description: URL to retrieve labels for this check
+ $ref: "#/components/schemas/Link"
+ members:
+ description: URL to retrieve members for this check
+ $ref: "#/components/schemas/Link"
+ owners:
+ description: URL to retrieve owners for this check
+ $ref: "#/components/schemas/Link"
+ required: [name, type, orgID, query]
+ ThresholdCheck:
+ allOf:
+ - $ref: "#/components/schemas/CheckBase"
+ - type: object
+ properties:
+ type:
+ type: string
+ enum: [threshold]
+ thresholds:
+ type: array
+ items:
+ $ref: "#/components/schemas/Threshold"
+ Threshold:
+ oneOf:
+ - $ref: "#/components/schemas/GreaterThreshold"
+ - $ref: "#/components/schemas/LesserThreshold"
+ - $ref: "#/components/schemas/RangeThreshold"
+ discriminator:
+ propertyName: type
+ mapping:
+ greater: "#/components/schemas/GreaterThreshold"
+ lesser: "#/components/schemas/LesserThreshold"
+ range: "#/components/schemas/RangeThreshold"
+ DeadmanCheck:
+ allOf:
+ - $ref: "#/components/schemas/CheckBase"
+ - type: object
+ properties:
+ type:
+ type: string
+ enum: [deadman]
+ timeSince:
+ description: String duration before deadman triggers.
+ type: string
+ staleTime:
+ description: String duration for time that a series is considered stale and should not trigger deadman.
+ type: string
+ reportZero:
+ description: If only zero values reported since time, trigger an alert
+ type: boolean
+ level:
+ $ref: "#/components/schemas/CheckStatusLevel"
+ ThresholdBase:
+ properties:
+ level:
+ $ref: "#/components/schemas/CheckStatusLevel"
+ allValues:
+ description: If true, only alert if all values meet threshold.
+ type: boolean
+ GreaterThreshold:
+ allOf:
+ - $ref: "#/components/schemas/ThresholdBase"
+ - type: object
+ required: [type, value]
+ properties:
+ type:
+ type: string
+ enum: [greater]
+ value:
+ type: number
+ format: float
+ LesserThreshold:
+ allOf:
+ - $ref: "#/components/schemas/ThresholdBase"
+ - type: object
+ required: [type, value]
+ properties:
+ type:
+ type: string
+ enum: [lesser]
+ value:
+ type: number
+ format: float
+ RangeThreshold:
+ allOf:
+ - $ref: "#/components/schemas/ThresholdBase"
+ - type: object
+ required: [type, min, max, within]
+ properties:
+ type:
+ type: string
+ enum: [range]
+ min:
+ type: number
+ format: float
+ max:
+ type: number
+ format: float
+ within:
+ type: boolean
+ CheckStatusLevel:
+ description: The state to record if check matches a criteria.
+ type: string
+ enum: ["UNKNOWN", "OK", "INFO", "CRIT", "WARN"]
+ RuleStatusLevel:
+ description: The state to record if check matches a criteria.
+ type: string
+ enum: ["UNKNOWN", "OK", "INFO", "CRIT", "WARN", "ANY"]
+ NotificationRuleUpdate:
+ type: object
+ properties:
+ name:
+ type: string
+ description:
+ type: string
+ status:
+ type: string
+ enum:
+ - active
+ - inactive
+ NotificationRule:
+ oneOf:
+ - $ref: "#/components/schemas/SlackNotificationRule"
+ - $ref: "#/components/schemas/SMTPNotificationRule"
+ - $ref: "#/components/schemas/PagerDutyNotificationRule"
+ - $ref: "#/components/schemas/HTTPNotificationRule"
+ discriminator:
+ propertyName: type
+ mapping:
+ slack: "#/components/schemas/SlackNotificationRule"
+ smtp: "#/components/schemas/SMTPNotificationRule"
+ pagerduty: "#/components/schemas/PagerDutyNotificationRule"
+ http: "#/components/schemas/HTTPNotificationRule"
+ NotificationRules:
+ properties:
+ notificationRules:
+ type: array
+ items:
+ $ref: "#/components/schemas/NotificationRule"
+ links:
+ $ref: "#/components/schemas/Links"
+ NotificationRuleBase:
+ type: object
+ required:
+ - id
+ - orgID
+ - status
+ - name
+ - tagRules
+ - statusRules
+ - endpointID
+ properties:
+ id:
+ readOnly: true
+ type: string
+ endpointID:
+ type: string
+ orgID:
+ description: The ID of the organization that owns this notification rule.
+ type: string
+ ownerID:
+ description: The ID of creator used to create this notification rule.
+ type: string
+ readOnly: true
+ createdAt:
+ type: string
+ format: date-time
+ readOnly: true
+ updatedAt:
+ type: string
+ format: date-time
+ readOnly: true
+ status:
+ $ref: "#/components/schemas/TaskStatusType"
+ name:
+ description: Human-readable name describing the notification rule.
+ type: string
+ sleepUntil:
+ type: string
+ every:
+ description: The notification repetition interval.
+ type: string
+ offset:
+ description: Duration to delay after the schedule, before executing check.
+ type: string
+ runbookLink:
+ type: string
+ limitEvery:
+ description: Don't notify me more than times every seconds. If set, limit cannot be empty.
+ type: integer
+ limit:
+ description: Don't notify me more than times every seconds. If set, limitEvery cannot be empty.
+ type: integer
+ tagRules:
+ description: List of tag rules the notification rule attempts to match.
+ type: array
+ items:
+ $ref: "#/components/schemas/TagRule"
+ description:
+ description: An optional description of the notification rule.
+ type: string
+ statusRules:
+ description: List of status rules the notification rule attempts to match.
+ type: array
+ minItems: 1
+ items:
+ $ref: "#/components/schemas/StatusRule"
+ labels:
+ $ref: "#/components/schemas/Labels"
+ links:
+ type: object
+ readOnly: true
+ example:
+ self: "/api/v2/notificationRules/1"
+ labels: "/api/v2/notificationRules/1/labels"
+ members: "/api/v2/notificationRules/1/members"
+ owners: "/api/v2/notificationRules/1/owners"
+ properties:
+ self:
+ description: URL for this endpoint.
+ $ref: "#/components/schemas/Link"
+ labels:
+ description: URL to retrieve labels for this notification rule.
+ $ref: "#/components/schemas/Link"
+ members:
+ description: URL to retrieve members for this notification rule.
+ $ref: "#/components/schemas/Link"
+ owners:
+ description: URL to retrieve owners for this notification rule.
+ $ref: "#/components/schemas/Link"
+ TagRule:
+ type: object
+ properties:
+ key:
+ type: string
+ value:
+ type: string
+ operator:
+ type: string
+ enum: ["equal", "notequal", "equalregex","notequalregex"]
+ StatusRule:
+ type: object
+ properties:
+ currentLevel:
+ $ref: "#/components/schemas/RuleStatusLevel"
+ previousLevel:
+ $ref: "#/components/schemas/RuleStatusLevel"
+ count:
+ type: integer
+ period:
+ type: string
+ HTTPNotificationRuleBase:
+ type: object
+ required: [type, url]
+ properties:
+ type:
+ type: string
+ enum: [http]
+ url:
+ type: string
+ HTTPNotificationRule:
+ allOf:
+ - $ref: "#/components/schemas/NotificationRuleBase"
+ - $ref: "#/components/schemas/HTTPNotificationRuleBase"
+ SlackNotificationRuleBase:
+ type: object
+ required: [type, messageTemplate]
+ properties:
+ type:
+ type: string
+ enum: [slack]
+ channel:
+ type: string
+ messageTemplate:
+ type: string
+ SlackNotificationRule:
+ allOf:
+ - $ref: "#/components/schemas/NotificationRuleBase"
+ - $ref: "#/components/schemas/SlackNotificationRuleBase"
+ SMTPNotificationRule:
+ allOf:
+ - $ref: "#/components/schemas/NotificationRuleBase"
+ - $ref: "#/components/schemas/SMTPNotificationRuleBase"
+ SMTPNotificationRuleBase:
+ type: object
+ required: [type, subjectTemplate, to]
+ properties:
+ type:
+ type: string
+ enum: [smtp]
+ subjectTemplate:
+ type: string
+ bodyTemplate:
+ type: string
+ to:
+ type: string
+ PagerDutyNotificationRule:
+ allOf:
+ - $ref: "#/components/schemas/NotificationRuleBase"
+ - $ref: "#/components/schemas/PagerDutyNotificationRuleBase"
+ PagerDutyNotificationRuleBase:
+ type: object
+ required: [type, messageTemplate]
+ properties:
+ type:
+ type: string
+ enum: [pagerduty]
+ messageTemplate:
+ type: string
+ NotificationEndpointUpdate:
+ type: object
+ properties:
+ name:
+ type: string
+ description:
+ type: string
+ status:
+ type: string
+ enum:
+ - active
+ - inactive
+ NotificationEndpoint:
+ oneOf:
+ - $ref: "#/components/schemas/SlackNotificationEndpoint"
+ - $ref: "#/components/schemas/PagerDutyNotificationEndpoint"
+ - $ref: "#/components/schemas/HTTPNotificationEndpoint"
+ discriminator:
+ propertyName: type
+ mapping:
+ slack: "#/components/schemas/SlackNotificationEndpoint"
+ pagerduty: "#/components/schemas/PagerDutyNotificationEndpoint"
+ http: "#/components/schemas/HTTPNotificationEndpoint"
+ NotificationEndpoints:
+ properties:
+ notificationEndpoints:
+ type: array
+ items:
+ $ref: "#/components/schemas/NotificationEndpoint"
+ links:
+ $ref: "#/components/schemas/Links"
+ NotificationEndpointBase:
+ type: object
+ required: [type, name]
+ properties:
+ id:
+ type: string
+ orgID:
+ type: string
+ userID:
+ type: string
+ createdAt:
+ type: string
+ format: date-time
+ readOnly: true
+ updatedAt:
+ type: string
+ format: date-time
+ readOnly: true
+ description:
+ description: An optional description of the notification endpoint.
+ type: string
+ name:
+ type: string
+ status:
+ description: The status of the endpoint.
+ default: active
+ type: string
+ enum: ["active", "inactive"]
+ labels:
+ $ref: "#/components/schemas/Labels"
+ links:
+ type: object
+ readOnly: true
+ example:
+ self: "/api/v2/notificationEndpoints/1"
+ labels: "/api/v2/notificationEndpoints/1/labels"
+ members: "/api/v2/notificationEndpoints/1/members"
+ owners: "/api/v2/notificationEndpoints/1/owners"
+ properties:
+ self:
+ description: URL for this endpoint.
+ $ref: "#/components/schemas/Link"
+ labels:
+ description: URL to retrieve labels for this endpoint.
+ $ref: "#/components/schemas/Link"
+ members:
+ description: URL to retrieve members for this endpoint.
+ $ref: "#/components/schemas/Link"
+ owners:
+ description: URL to retrieve owners for this endpoint.
+ $ref: "#/components/schemas/Link"
+ type:
+ $ref: "#/components/schemas/NotificationEndpointType"
+ SlackNotificationEndpoint:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/NotificationEndpointBase"
+ - type: object
+ properties:
+ url:
+ description: Specifies the URL of the Slack endpoint. Specify either `URL` or `Token`.
+ type: string
+ token:
+ description: Specifies the API token string. Specify either `URL` or `Token`.
+ type: string
+ PagerDutyNotificationEndpoint:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/NotificationEndpointBase"
+ - type: object
+ required: [clientURL, routingKey]
+ properties:
+ clientURL:
+ type: string
+ routingKey:
+ type: string
+ HTTPNotificationEndpoint:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/NotificationEndpointBase"
+ - type: object
+ required: [url, authMethod, method]
+ properties:
+ url:
+ type: string
+ username:
+ type: string
+ password:
+ type: string
+ token:
+ type: string
+ method:
+ type: string
+ enum: ['POST', 'GET', 'PUT']
+ authMethod:
+ type: string
+ enum: ['none', 'basic', 'bearer']
+ contentTemplate:
+ type: string
+ headers:
+ type: object
+ description: Customized headers.
+ additionalProperties:
+ type: string
+ NotificationEndpointType:
+ type: string
+ enum: ['slack', 'pagerduty', 'http']
+ securitySchemes:
+ BasicAuth:
+ type: http
+ scheme: basic
diff --git a/assets/js/content-interactions.js b/assets/js/content-interactions.js
index ac2e10425..b2a5d4262 100644
--- a/assets/js/content-interactions.js
+++ b/assets/js/content-interactions.js
@@ -35,9 +35,9 @@ $('.article a[href^="#"]:not(' + elementWhiteList + ')').click(function (e) {
///////////////////////////// Left Nav Interactions /////////////////////////////
$(".children-toggle").click(function(e) {
- e.preventDefault()
- $(this).toggleClass('open');
- $(this).siblings('.children').toggleClass('open');
+ e.preventDefault()
+ $(this).toggleClass('open');
+ $(this).siblings('.children').toggleClass('open');
})
//////////////////////////// Mobile Contents Toggle ////////////////////////////
@@ -52,28 +52,28 @@ $('#contents-toggle-btn').click(function(e) {
function tabbedContent(container, tab, content) {
- // Add the active class to the first tab in each tab group,
- // in case it wasn't already set in the markup.
- $(container).each(function () {
- $(tab, this).removeClass('is-active');
- $(tab + ':first', this).addClass('is-active');
- });
+ // Add the active class to the first tab in each tab group,
+ // in case it wasn't already set in the markup.
+ $(container).each(function () {
+ $(tab, this).removeClass('is-active');
+ $(tab + ':first', this).addClass('is-active');
+ });
- $(tab).on('click', function(e) {
- e.preventDefault();
+ $(tab).on('click', function(e) {
+ e.preventDefault();
- // Make sure the tab being clicked is marked as active, and make the rest inactive.
- $(this).addClass('is-active').siblings().removeClass('is-active');
+ // Make sure the tab being clicked is marked as active, and make the rest inactive.
+ $(this).addClass('is-active').siblings().removeClass('is-active');
- // Render the correct tab content based on the position of the tab being clicked.
- const activeIndex = $(tab).index(this);
- $(content).each(function(i) {
- if (i === activeIndex) {
- $(this).show();
- $(this).siblings(content).hide();
- }
- });
- });
+ // Render the correct tab content based on the position of the tab being clicked.
+ const activeIndex = $(tab).index(this);
+ $(content).each(function(i) {
+ if (i === activeIndex) {
+ $(this).show();
+ $(this).siblings(content).hide();
+ }
+ });
+ });
}
tabbedContent('.code-tabs-wrapper', '.code-tabs p a', '.code-tab-content');
@@ -82,8 +82,8 @@ tabbedContent('.tabs-wrapper', '.tabs p a', '.tab-content');
/////////////////////////////// Truncate Content ///////////////////////////////
$(".truncate-toggle").click(function(e) {
- e.preventDefault()
- $(this).closest('.truncate').toggleClass('closed');
+ e.preventDefault()
+ $(this).closest('.truncate').toggleClass('closed');
})
//////////////////// Replace Missing Images with Placeholder ///////////////////
@@ -92,3 +92,11 @@ $(".article--content img").on("error", function() {
$(this).attr("src", "/img/coming-soon.svg");
$(this).attr("style", "max-width:500px;");
});
+
+////////////////////////// Inject tooltips on load //////////////////////////////
+
+$('.tooltip').each( function(){
+ $toolTipText = $('').addClass('tooltip-text').text($(this).attr('data-tooltip-text'));
+ $toolTipElement = $('').addClass('tooltip-container').append($toolTipText);
+ $(this).prepend($toolTipElement);
+});
diff --git a/assets/js/telegraf-filters.js b/assets/js/telegraf-filters.js
new file mode 100644
index 000000000..1c17a2702
--- /dev/null
+++ b/assets/js/telegraf-filters.js
@@ -0,0 +1,50 @@
+// Count tag elements
+function countTag(tag) {
+ return $(".visible[data-tags*='" + tag + "']").length
+}
+
+function getFilterCounts() {
+ $('#plugin-filters label').each(function() {
+ var tagName = $('input', this).attr('name').replace(/[\W]+/, "-");
+ var tagCount = countTag(tagName);
+ $(this).attr('data-count', '(' + tagCount + ')');
+ if (tagCount <= 0) {
+ $(this).fadeTo(200, 0.25);
+ } else {
+ $(this).fadeTo(400, 1.0);
+ }
+ })
+}
+
+// Get initial filter count on page load
+getFilterCounts()
+
+$("#plugin-filters input").click(function() {
+
+ // List of tags to hide
+ var tagArray = $("#plugin-filters input:checkbox:checked").map(function(){
+ return $(this).attr('name').replace(/[\W]+/, "-");
+ }).get();
+
+ // List of tags to restore
+ var restoreArray = $("#plugin-filters input:checkbox:not(:checked)").map(function(){
+ return $(this).attr('name').replace(/[\W]+/, "-");
+ }).get();
+
+ // Actions for filter select
+ if ( $(this).is(':checked') ) {
+ $.each( tagArray, function( index, value ) {
+ $(".plugin-card.visible:not([data-tags~='" + value + "'])").removeClass('visible').fadeOut()
+ })
+ } else {
+ $.each( restoreArray, function( index, value ) {
+ $(".plugin-card:not(.visible)[data-tags~='" + value + "']").addClass('visible').fadeIn()
+ })
+ $.each( tagArray, function( index, value ) {
+ $(".plugin-card.visible:not([data-tags~='" + value + "'])").removeClass('visible').hide()
+ })
+ }
+
+ // Refresh filter count
+ getFilterCounts()
+});
diff --git a/assets/styles/layouts/_api-overrides.scss b/assets/styles/layouts/_api-overrides.scss
new file mode 100644
index 000000000..080b3d918
--- /dev/null
+++ b/assets/styles/layouts/_api-overrides.scss
@@ -0,0 +1,266 @@
+@import "tools/color-palette";
+@import "tools/icomoon";
+
+// Fonts
+$rubik: 'Rubik', sans-serif;
+$roboto: 'Roboto', sans-serif;
+$roboto-mono: 'Roboto Mono', monospace;
+
+// Font weights
+$medium: 500;
+$bold: 700;
+
+//////////////////////////////////// LOADER ////////////////////////////////////
+
+#loading {
+ position: fixed;
+ width: 100vw;
+ height: 100vh;
+ z-index: 1000;
+ background-color: $g20-white;
+ opacity: 1;
+ transition: opacity .5s;
+}
+
+@keyframes spinner {
+ to {transform: rotate(360deg);}
+}
+
+.spinner:before {
+ content: '';
+ box-sizing: border-box;
+ position: absolute;
+ top: 50%;
+ left: 50%;
+ width: 50px;
+ height: 50px;
+ margin-top: -25px;
+ margin-left: -25px;
+ border-radius: 50%;
+ border: 3px solid $g16-pearl;
+ border-top-color: $cp-comet;
+ animation: spinner .6s linear infinite;
+}
+
+//////////////////////////////// InfluxDB Header ///////////////////////////////
+
+#influx-header {
+ font-family: $rubik;
+ padding: 15px 20px ;
+ display: block;
+ background-color: $wp-violentdark;
+ a {
+ color: $g20-white;
+ text-decoration: none;
+ transition: color .2s;
+ &:hover {
+ color: $b-pool;
+ }
+ &:before {
+ content: '\e918';
+ font-family: 'icomoon';
+ margin-right: .65rem;
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+.cjtbAK {
+ h1,h2,h3,h4,h5,h6,
+ p,li,th,td {
+ font-family: $rubik !important;
+ }
+}
+
+#redoc {
+ h1,h2,h3,h4,h5,h6 {
+ font-weight: $medium !important;
+ }
+}
+
+// Section title padding
+.dluJDj {
+ padding: 20px 0;
+}
+
+// Page h1
+.dTJWQH {
+ color: $g7-graphite;
+ font-size: 2rem;
+}
+
+// Download button
+.jIdpVJ {
+ background: $b-dodger;
+ color: $g20-white;
+ border: none;
+ border-radius: 3px;
+ font-family: $rubik;
+ font-size: .85rem;
+ font-weight: $medium;
+ transition: background-color .2s;
+ &:hover {
+ background-color: $b-pool;
+ }
+}
+
+// Tag h1s
+.WxWXp {
+ color: $g7-graphite;
+ font-size: 1.75rem;
+}
+
+// Summaru h2s and table headers
+.ioYTqA, .bxcHYI, .hoUoen {
+ color: $g7-graphite;
+}
+
+// h3s
+.espozG {
+ color: $g8-storm;
+}
+
+// Links
+.bnFPhO a { color: $b-dodger;
+ &:visited {color: $b-dodger;}
+}
+
+.redoc-json {
+ font-family: $roboto-mono !important;
+}
+
+// Inline Code
+.flfxUM code,
+.gDsWLk code,
+.kTVySD {
+ font-family: $roboto-mono !important;
+ color: $cp-marguerite;
+ background: $cp-titan;
+ border-color: $cp-titan;
+}
+
+// Required tags
+.jsTAxL {
+ color: $o-curacao;
+}
+
+///////////////////////////// RESPONSE COLOR BLOCKS ////////////////////////////
+
+// Green
+.hLVzSF {
+ background-color: rgba($gr-wasabi, .5);
+ color: $gr-emerald;
+}
+
+// Red
+.byLrBg {
+ background-color: rgba($o-marmelade, .35);
+ color: $o-curacao;
+}
+
+
+
+/////////////////////////////////// LEFT NAV ///////////////////////////////////
+
+// Left nav background
+.gZdDsM {
+ background-color: $g19-ghost;
+}
+
+.gpbcFk:hover, .sc-eTuwsz.active {
+ background-color: rgb(237, 237, 237);
+}
+
+// List item text
+.SmuWE, .gcUzvG, .bbViyS, .sc-hrWEMg label {
+ font-family: $rubik !important;
+}
+
+.fyUykq {
+ font-weight: $medium;
+}
+
+// Request method tags
+.cFwMcp {
+ &.post { background-color: $b-curious; }
+ &.get { background-color: $gr-canopy; }
+ &.put { background-color: $cp-comet; }
+ &.patch { background-color: $ch-keylime; }
+ &.delete { background-color: $o-curacao; }
+}
+
+// Active nav section
+.gcUzvG, .iNzLCk:hover {
+ color: $m-magenta;
+}
+
+/////////////////////////////// RIGHT CODE COLUMN //////////////////////////////
+
+// Right column backgrounds
+.dtUibw, .fLUKgj {
+ background-color: $wp-jagger;
+ h3,h4,h5,h6 {
+ font-family: $rubik !important;
+ font-weight: $medium !important;
+ }
+}
+
+// Code backgrounds
+.irpqyy > .react-tabs__tab-panel {
+ background-color: $wp-telopea;
+}
+.dHLKeu, .fVaxnA {
+ padding-left: 10px;
+ background-color: $wp-telopea;
+}
+
+// Response code tabs
+.irpqyy > ul > li {
+ background-color: $wp-telopea;
+ border-radius: 3px;
+ &.react-tabs__tab--selected{ color: $cp-blueviolet; }
+ &.tab-error { color: $o-fire; }
+ &.tab-success { color: $gr-viridian; }
+}
+
+// Request methods
+.bNYCAJ,
+.jBjYbV,
+.hOczRB,
+.fRsrDc,
+.hPskZd {
+ font-family: $rubik;
+ font-weight: $medium;
+ letter-spacing: .04em;
+ border-radius: 3px;
+}
+.bNYCAJ { background-color: $b-curious; } /* Post */
+.jBjYbV { background-color: $gr-canopy; } /* Get */
+.hOczRB { background-color: $cp-comet; } /* Put */
+.fRsrDc { background-color: $ch-chartreuse; color: $ch-olive; } /* Patch */
+.hPskZd { background-color: $o-curacao; } /* Delete */
+
+// Content type block
+.gzAoUb {
+ background-color: rgba($wp-jagger, .4);
+ font-family: $rubik;
+}
+.iENVAs { font-family: $roboto-mono; }
+.dpMbau { font-family: $rubik; }
+
+// Code controls
+.fCJmC {
+ font-family: $rubik;
+ span { border-radius: 3px; }
+}
+
+// Code blocks
+.kZHJcC { font-family: $roboto-mono; }
+.jCgylq {
+ .token.string {
+ color: $gr-honeydew;
+ & + a { color: $b-malibu; }
+ }
+ .token.boolean { color: #f955b0; }
+}
diff --git a/assets/styles/layouts/_article.scss b/assets/styles/layouts/_article.scss
index 7d36aad37..1c8ef4a49 100644
--- a/assets/styles/layouts/_article.scss
+++ b/assets/styles/layouts/_article.scss
@@ -18,7 +18,8 @@
}
}
h2,h3,h4,h5,h6 {
- & + .highlight pre { margin-top: .5rem; }
+ & + .highlight pre { margin-top: .5rem }
+ & + pre { margin-top: .5rem }
& + .code-tabs-wrapper { margin-top: 0; }
}
h1 {
@@ -61,7 +62,7 @@
p,li {
color: $article-text;
- line-height: 1.6rem;
+ line-height: 1.7rem;
}
p {
@@ -106,10 +107,12 @@
"article/lists",
"article/note",
"article/pagination-btns",
+ "article/related",
"article/scrollbars",
"article/tabbed-content",
"article/tables",
"article/tags",
+ "article/telegraf-plugins",
"article/truncate",
"article/warn";
diff --git a/assets/styles/layouts/_inline-icons.scss b/assets/styles/layouts/_inline-icons.scss
index 0609c0c24..b654d14bd 100644
--- a/assets/styles/layouts/_inline-icons.scss
+++ b/assets/styles/layouts/_inline-icons.scss
@@ -26,22 +26,22 @@
&.ui-toggle {
display: inline-block;
position: relative;
- width: 34px;
- height: 22px;
- background: #1C1C21;
- border: 2px solid #383846;
+ width: 28px;
+ height: 16px;
+ background: $b-pool;
border-radius: .7rem;
- vertical-align: text-bottom;
+ vertical-align: text-top;
+ margin-top: 2px;
.circle {
display: inline-block;
position: absolute;
border-radius: 50%;
- height: 12px;
- width: 12px;
- background: #22ADF6;
- top: 3px;
- right: 3px;
+ height: 8px;
+ width: 8px;
+ background: $g20-white;
+ top: 4px;
+ right: 4px;
}
}
}
diff --git a/assets/styles/layouts/_landing.scss b/assets/styles/layouts/_landing.scss
index 88df3f075..9c5409264 100644
--- a/assets/styles/layouts/_landing.scss
+++ b/assets/styles/layouts/_landing.scss
@@ -1,9 +1,10 @@
.cards {
display: flex;
- justify-content: space-between;
- flex-direction: column;
+ flex-direction: row;
position: relative;
overflow: hidden;
+ border-radius: $radius 0 0 $radius;
+ min-height: 700px;
background: linear-gradient(55deg, $landing-lg-gradient-left, $landing-lg-gradient-right );
a {
@@ -21,62 +22,110 @@
}
}
+ .main {
+ width: 66%;
+ padding: 5rem 2vw 5rem 4.5vw;
+ display: flex;
+ justify-content: center;
+ flex-direction: column;
+
+ text-align: center;
+ z-index: 1;
+ }
+
.group {
display: flex;
-
flex-wrap: wrap;
+ width: 34%;
+ justify-content: flex-end;
}
.card {
- text-align: center;
- z-index: 1;
- &.full {
- width: 100%;
- padding: 5rem 2rem;
- }
+ &.sm {
+ display: flex;
+ flex-direction: column;
+ justify-content: center;
+ text-align: left;
+ width: 90%;
+ position: relative;
+ margin-bottom: 1px;
+ padding: 2rem 3.5vw 2rem 3vw;
+ min-height: 140px;
+ background: $landing-sm-bg;
+ transition: background-color .4s, width .2s;
+
+ &:last-child{ margin-bottom: 0; }
- &.quarter {
- flex-grow: 2;
- margin: 1px;
- padding: 1.5rem;
- background: rgba($landing-sm-gradient-overlay, .65);
- transition: background-color .4s;
&:hover {
- background: rgba($landing-sm-gradient-overlay, .9);
+ background: $landing-sm-bg-hover;
+ width: 100%;
+ h3 {
+ transform: translateY(-1.2rem);
+ font-weight: $medium;
+ font-size: 1.2rem;
+ }
+ p {
+ opacity: 1;
+ transition-delay: 100ms;
+ }
+ }
+
+ h3 {
+ font-size: 1.1rem;
+ transition: all .2s;
+ }
+ p {
+ position: absolute;
+ width: 80%;
+ color: $g20-white;
+ font-size: .95rem;
+ line-height: 1.25rem;
+ opacity: 0;
+ transition: opacity .2s;
}
}
h1,h2,h3,h4 {
font-weight: 300;
- text-align: center;
color: $g20-white;
}
h1 {
margin: 0 0 1.25rem;
- font-size: 2.25rem;
+ font-size: 2.5rem;
z-index: 1;
}
- h3 { font-size: 1.25rem;}
-
get-started {
- text-align: center;
-
.btn {
display: inline-block;
- padding: .85rem 1.5rem;
- color: $g20-white;
- font-weight: bold;
- background: rgba($g20-white, .25);
- border: 2px solid rgba($g20-white, .5);
- border-radius: $radius;
+ padding: 1.25rem;
+ margin: 0 20% .35rem;
+ color: $landing-btn-text;
+ font-size: 1.1rem;
+ font-weight: $medium;
+ background: $landing-btn-bg;
transition: background-color .2s, color .2s;
+ border-radius: $radius;
+
+ &.oss:after {
+ content: 'alpha';
+ display: inline-block;
+ vertical-align: top;
+ font-style: italic;
+ font-size: .75em;
+ margin-left: .45rem;
+ padding: .1rem .3rem .12rem;
+ border-radius: $radius;
+ border: 1px solid rgba($landing-btn-text, .5);
+ transition: border-color .2s;
+ }
&:hover {
- background: $g20-white;
- color: $b-pool;
+ background: $landing-btn-bg-hover;
+ color: $landing-btn-text-hover;
+ &:after { border-color: rgba($landing-btn-text-hover, .5) }
}
}
}
@@ -97,17 +146,59 @@
}
}
+@media (max-width: 1150px) {
+ .cards {
+ flex-direction: column;
+ .main { width: 100%; }
+ .group {
+ width: 100%;
+ .card.sm {
+ margin-right: 1px;
+ padding: 2rem;
+ flex-grow: 2;
+ width: 49%;
+ text-align: center;
+ background: $landing-sm-bg-alt;
+ h3 {
+ margin: 0 0 .5rem;
+ font-size: 1.1rem;
+ font-weight: $medium;
+ }
+ p {
+ opacity: .6;
+ position: relative;
+ width: auto;
+ margin: 0;
+ }
+ &:hover {
+ background: $landing-sm-bg-hover;
+ h3 { transform: none; }
+ p { opacity: 1; }
+ }
+ }
+ }
+ }
+}
+
@include media(small) {
.cards {
- .group { flex-direction: column; }
- .card{
- &.full { padding: 2.5rem;}
- &.quarter {
+ .group {
+ flex-direction: column;
+ .card.sm {
width: 100%;
max-width: 100%;
padding: 1.25rem;
}
+ }
+ .card{
h1 { font-size: 2rem; }
+ &.main {
+ padding: 2.5rem;
+ get-started .btn {
+ font-size: 1rem;
+ margin: 0 0 .35rem;
+ }
+ }
}
}
}
diff --git a/assets/styles/layouts/article/_code.scss b/assets/styles/layouts/article/_code.scss
index 259b81263..85a88cc2b 100644
--- a/assets/styles/layouts/article/_code.scss
+++ b/assets/styles/layouts/article/_code.scss
@@ -8,7 +8,7 @@ code,pre {
p,li,table,h2,h3,h4,h5,h6 {
code {
- padding: .15rem .45rem .25rem;
+ padding: .1rem .4rem .2rem;
border-radius: $radius;
color: $article-code;
white-space: nowrap;
@@ -54,7 +54,9 @@ pre {
overflow-y: hidden;
code {
padding: 0;
- line-height: 1.4rem;
+ font-size: .95rem;
+ line-height: 1.5rem;
+ white-space: pre;
}
}
diff --git a/assets/styles/layouts/article/_related.scss b/assets/styles/layouts/article/_related.scss
new file mode 100644
index 000000000..fa8d8e14c
--- /dev/null
+++ b/assets/styles/layouts/article/_related.scss
@@ -0,0 +1,15 @@
+.related {
+ border-top: 1px solid $article-hr;
+ padding-top: 1.5rem;
+
+ h4 { font-size: 1.15rem; }
+ ul {
+ list-style: none;
+ padding: 0;
+ margin-top: 0;
+ }
+ li {
+ margin: .5rem 0;
+ line-height: 1.25rem;
+ }
+}
diff --git a/assets/styles/layouts/article/_tags.scss b/assets/styles/layouts/article/_tags.scss
index 8c22756d8..f56f731d4 100644
--- a/assets/styles/layouts/article/_tags.scss
+++ b/assets/styles/layouts/article/_tags.scss
@@ -2,8 +2,8 @@
.tags {
border-top: 1px solid $article-hr;
- padding-top: 1.5rem;
- margin-top: 2rem;
+ padding-top: 1.75rem;
+ margin: 2rem 0 1rem;
.tag {
background: $body-bg;
@@ -15,3 +15,9 @@
font-size: .8rem;
}
}
+
+.related + .tags {
+ border: none;
+ padding-top: 0;
+ margin: 1.5rem 0 1rem;
+}
diff --git a/assets/styles/layouts/article/_telegraf-plugins.scss b/assets/styles/layouts/article/_telegraf-plugins.scss
new file mode 100644
index 000000000..683d7382b
--- /dev/null
+++ b/assets/styles/layouts/article/_telegraf-plugins.scss
@@ -0,0 +1,217 @@
+/////////////////////// Styles for Telegraf plugin cards ///////////////////////
+
+.plugin-card {
+ position: relative;
+ padding: 1rem 1.5rem;
+ margin-bottom: .5rem;
+ justify-content: center;
+ align-items: center;
+ background: rgba($body-bg, .4);
+ border-radius: $radius;
+
+ h3 {
+ padding: 0;
+ margin-top: .25rem;
+ }
+
+ &.new h3:after {
+ content: "New";
+ margin-left: .3rem;
+ padding: .25rem .5rem;
+ font-style: italic;
+ color: $nav-active;
+ font-size: 1.2rem;
+ }
+
+ p {
+ &.meta {
+ margin: .75rem 0;
+ font-weight: $medium;
+ line-height: 1.75rem;
+
+ .deprecated {
+ margin-left: .5rem;
+ font-style: italic;
+ color: $article-code-accent7;
+ }
+ }
+ }
+
+ & .info {
+ & > p:last-child { margin-bottom: .5rem; }
+ & > ul:last-child { margin-bottom: .5rem; }
+ & > ol:last-child { margin-bottom: .5rem; }
+ }
+
+ .github-link {
+ position: absolute;
+ top: 0;
+ right: 0.5rem;
+ opacity: 0;
+ transition: opacity .2s, background .2s, color 2s;
+
+ .icon-github {
+ font-size: 1.2rem;
+ margin: 0 .25rem 0 0;
+ }
+ }
+
+ &:hover {
+ .github-link { opacity: 1; }
+ }
+
+ // Special use-case for using block quotes in the yaml provided by the data file
+ blockquote {
+ border-color: $article-note-base;
+ background: rgba($article-note-base, .12);
+ h3,h4,h5,h6 { color: $article-note-heading; }
+ p, li {
+ color: $article-note-text;
+ font-size: 1rem;
+ font-style: normal;
+ }
+ strong { color: inherit; }
+ a {
+ color: $article-note-link;
+ code:after {
+ border-color: transparent rgba($article-note-code, .35) transparent transparent;
+ }
+ &:hover {
+ color: $article-note-link-hover;
+ code:after {
+ border-color: transparent $article-note-link-hover transparent transparent;
+ }
+ }
+ }
+ ol li:before { color: $article-note-text; }
+ code, pre{
+ color: $article-note-code;
+ background: $article-note-code-bg;
+ }
+ }
+}
+
+//////////////////////////////// Plugin Filters ////////////////////////////////
+
+#plugin-filters {
+ display: flex;
+ flex-flow: row wrap;
+ align-items: flex-start;
+
+ .filter-category {
+ flex: 1 1 200px;
+ margin: 0 1.25rem 1.25rem 0;
+ max-width: 33%;
+
+ &.two-columns {
+ flex: 1 2 400px;
+ max-width: 66%;
+ .filter-list {
+ columns: 2;
+ }
+ }
+ }
+
+ h5 {
+ border-bottom: 1px solid rgba($article-text, .25);
+ padding-bottom: .65rem;
+ }
+
+ .filter-list {
+ padding: 0;
+ margin: .5rem 0 0;
+ list-style: none;
+ li {
+ margin: 0;
+ line-height: 1.35rem;
+ }
+ }
+
+ label {
+ display: block;
+ padding: .25rem 0;
+ color: $article-text;
+ position: relative;
+
+ &:after {
+ content: attr(data-count);
+ margin-left: .25rem;
+ font-size: .85rem;
+ opacity: .5;
+ }
+ }
+
+ .checkbox {
+ display: inline-block;
+ height: 1.15em;
+ width: 1.15em;
+ background: rgba($article-text, .05);
+ margin-right: .3rem;
+ vertical-align: text-top;
+ border-radius: $radius;
+ cursor: pointer;
+ border: 1.5px solid rgba($article-text, .2);
+ user-select: none;
+ }
+
+ input[type='checkbox'] {
+ margin-right: -1.1rem ;
+ padding: 0;
+ vertical-align: top;
+ opacity: 0;
+ cursor: pointer;
+
+ & + .checkbox:after {
+ content: "";
+ display: block;
+ position: absolute;
+ height: .5rem;
+ width: .5rem;
+ border-radius: 50%;
+ background: $article-link;
+ top: .65rem;
+ left: .35rem;
+ opacity: 0;
+ transform: scale(2);
+ transition: all .2s;
+ }
+
+ &:checked + .checkbox:after {
+ opacity: 1;
+ transform: scale(1);
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+///////////////////////////////// MEDIA QUERIES ////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+
+@media(max-width: 1100px) {
+ #plugin-filters {
+ .filter-category {
+ max-width: 50%;
+ &.two-columns, &.three-columns {
+ max-width: 100%;
+ }
+ }
+ }
+}
+
+@include media(small) {
+ #plugin-filters{
+ .filter-category {
+ max-width: 100%;
+ }
+ }
+
+ .plugin-card {
+ .github-link {
+ opacity: 1;
+ padding: .25rem .35rem .35rem;
+ line-height: 0;
+ .icon-github { margin: 0; }
+ .hide { display: none; }
+ }
+ }
+}
diff --git a/assets/styles/styles-api.scss b/assets/styles/styles-api.scss
new file mode 100644
index 000000000..df8888069
--- /dev/null
+++ b/assets/styles/styles-api.scss
@@ -0,0 +1,4 @@
+// InfluxData API Docs style overrides
+// These override styles generated by ReDoc
+
+@import "layouts/api-overrides";
diff --git a/assets/styles/styles-default.scss b/assets/styles/styles-default.scss
index d22c47221..ac37b1ae0 100644
--- a/assets/styles/styles-default.scss
+++ b/assets/styles/styles-default.scss
@@ -1,9 +1,10 @@
// InfluxData Docs Default Theme (Light)
// Import Tools
-@import "tools/icomoon";
-@import "tools/media-queries.scss";
-@import "tools/mixins.scss";
+@import "tools/icomoon",
+ "tools/media-queries.scss",
+ "tools/mixins.scss",
+ "tools/tooltips";
// Import default light theme
@import "themes/theme-light.scss";
diff --git a/assets/styles/themes/_theme-dark.scss b/assets/styles/themes/_theme-dark.scss
index 20144e52e..60f2ba14f 100644
--- a/assets/styles/themes/_theme-dark.scss
+++ b/assets/styles/themes/_theme-dark.scss
@@ -86,7 +86,7 @@ $article-note-table-row-alt: #3B2862;
$article-note-table-scrollbar: $np-deepnight;
$article-note-shadow: $np-deepnight;
$article-note-code: $cp-comet;
-$article-note-code-bg: $wp-telopea;
+$article-note-code-bg: $wp-jaguar;
$article-note-code-accent1: #567375;
$article-note-code-accent2: $b-pool;
$article-note-code-accent3: $gr-viridian;
@@ -168,5 +168,17 @@ $error-page-btn-hover-text: $b-dodger;
// Landing Page colors
$landing-lg-gradient-left: $wp-violentdark;
$landing-lg-gradient-right: $cp-minsk;
-$landing-sm-gradient-overlay: $b-dodger;
+$landing-sm-bg: $cp-victoria;
+$landing-sm-bg-alt: $cp-victoria;
+$landing-sm-bg-hover: $b-dodger;
+$landing-btn-text: $g20-white;
+$landing-btn-bg: $b-dodger;
+$landing-btn-text-hover: $b-dodger;
+$landing-btn-bg-hover: $g20-white;
$landing-artwork-color: $cp-minsk;
+
+// Tooltip colors
+$tooltip-color: $ch-chartreuse;
+$tooltip-color-alt: $ch-canary;
+$tooltip-bg: $g20-white;
+$tooltip-text: $cp-minsk;
diff --git a/assets/styles/themes/_theme-light.scss b/assets/styles/themes/_theme-light.scss
index c8aec011f..b89511d8d 100644
--- a/assets/styles/themes/_theme-light.scss
+++ b/assets/styles/themes/_theme-light.scss
@@ -46,7 +46,7 @@ $nav-active: $m-magenta !default;
// Article Content
$article-bg: $g20-white !default;
-$article-heading: $cp-purple !default;
+$article-heading: $cp-marguerite !default;
$article-heading-alt: $g7-graphite !default;
$article-text: $g8-storm !default;
$article-bold: $g8-storm !default;
@@ -167,7 +167,19 @@ $error-page-btn-hover: $b-pool !default;
$error-page-btn-hover-text: $g20-white !default;
// Landing Page colors
-$landing-lg-gradient-left: $cp-marguerite !default;
-$landing-lg-gradient-right: $b-pool !default;
-$landing-sm-gradient-overlay: $cp-blueviolet !default;
+$landing-lg-gradient-left: $cp-jakarta !default;
+$landing-lg-gradient-right: $wp-heart !default;
+$landing-sm-bg: $wp-seance !default;
+$landing-sm-bg-alt: $wp-jagger !default;
+$landing-sm-bg-hover: $b-dodger !default;
+$landing-btn-text: $g20-white !default;
+$landing-btn-bg: $b-dodger !default;
+$landing-btn-text-hover: $b-dodger !default;
+$landing-btn-bg-hover: $g20-white !default;
$landing-artwork-color: rgba($g20-white, .15) !default;
+
+// Tooltip colors
+$tooltip-color: $m-magenta !default;
+$tooltip-color-alt: $wp-trance !default;
+$tooltip-bg: $m-lavander !default;
+$tooltip-text: $g20-white !default;
diff --git a/assets/styles/tools/_color-palette.scss b/assets/styles/tools/_color-palette.scss
index 7004a1aa2..146282cfa 100644
--- a/assets/styles/tools/_color-palette.scss
+++ b/assets/styles/tools/_color-palette.scss
@@ -24,6 +24,7 @@ $g19-ghost: #FAFAFC;
$g20-white: #FFFFFF; // Brand color
// Warm Purples - Magentas
+$wp-jaguar: #1d0135;
$wp-telopea: #23043E;
$wp-violentdark: #2d0749;
$wp-violet: #32094E;
diff --git a/assets/styles/tools/_icomoon.scss b/assets/styles/tools/_icomoon.scss
index acf227bd4..32f4e000f 100644
--- a/assets/styles/tools/_icomoon.scss
+++ b/assets/styles/tools/_icomoon.scss
@@ -1,10 +1,10 @@
@font-face {
font-family: 'icomoon';
- src: url('fonts/icomoon.eot?972u0y');
- src: url('fonts/icomoon.eot?972u0y#iefix') format('embedded-opentype'),
- url('fonts/icomoon.ttf?972u0y') format('truetype'),
- url('fonts/icomoon.woff?972u0y') format('woff'),
- url('fonts/icomoon.svg?972u0y#icomoon') format('svg');
+ src: url('fonts/icomoon.eot?9r9zke');
+ src: url('fonts/icomoon.eot?9r9zke#iefix') format('embedded-opentype'),
+ url('fonts/icomoon.ttf?9r9zke') format('truetype'),
+ url('fonts/icomoon.woff?9r9zke') format('woff'),
+ url('fonts/icomoon.svg?9r9zke#icomoon') format('svg');
font-weight: normal;
font-style: normal;
}
@@ -24,9 +24,24 @@
-moz-osx-font-smoothing: grayscale;
}
+.icon-ui-disks-nav:before {
+ content: "\e93c";
+}
+.icon-ui-wrench-nav:before {
+ content: "\e93d";
+}
+.icon-ui-eye-closed:before {
+ content: "\e956";
+}
+.icon-ui-eye-open:before {
+ content: "\e957";
+}
.icon-ui-chat:before {
content: "\e93a";
}
+.icon-ui-bell:before {
+ content: "\e93b";
+}
.icon-ui-cloud:before {
content: "\e93f";
}
@@ -216,6 +231,9 @@
.icon-loop2:before {
content: "\ea2e";
}
+.icon-github:before {
+ content: "\eab0";
+}
.icon-tux:before {
content: "\eabd";
}
diff --git a/assets/styles/tools/_tooltips.scss b/assets/styles/tools/_tooltips.scss
new file mode 100644
index 000000000..72673ff6a
--- /dev/null
+++ b/assets/styles/tools/_tooltips.scss
@@ -0,0 +1,91 @@
+@import "themes/theme-light.scss";
+
+// Font weights
+$medium: 500;
+$bold: 700;
+
+// Border radius
+$radius: 3px;
+
+////////////////////////////////// Tool Tips //////////////////////////////////
+
+.tooltip {
+ position: relative;
+ display: inline;
+ font-weight: $medium;
+ color: $tooltip-color;
+
+ &:hover {
+ .tooltip-container { visibility: visible; }
+ .tooltip-text {
+ opacity: 1;
+ transform: translate(-50%,-2.5rem);
+ }
+ }
+
+ .tooltip-container {
+ position: absolute;
+ top: 0;
+ left: 50%;
+ transform: translateX(-50%);
+ overflow: visible;
+ visibility: hidden;
+ }
+
+ .tooltip-text {
+ font-weight: $medium;
+ position: absolute;
+ border-radius: $radius;
+ padding: .15rem .75rem;
+ font-size: 0.9rem;
+ line-height: 1.75rem;
+ left: 50%;
+ transform: translate(-50%,-1.75rem);
+ transition: all 0.2s ease;
+ white-space: nowrap;
+ opacity: 0;
+ color: $tooltip-text;
+ background-color: $tooltip-bg;
+
+ &:after {
+ content: '';
+ position: absolute;
+ left: 50%;
+ bottom: -14px;
+ transform: translateX(-50%);
+ border-top: 8px solid $tooltip-bg;
+ border-right: 8px solid transparent;
+ border-bottom: 8px solid transparent;
+ border-left: 8px solid transparent;
+ }
+ }
+}
+
+th .tooltip {
+ color: $tooltip-color-alt;
+
+ &:hover {
+ .tooltip-container { visibility: visible; }
+ .tooltip-text {
+ opacity: 1;
+ transform: translate(-50%,1.75rem);
+ }
+ }
+
+ .tooltip-text {
+ transform: translate(-50%,1rem);
+
+ &:after {
+ content: '';
+ position: absolute;
+ height: 0;
+ left: 50%;
+ top: -14px;
+ transform: translateX(-50%);
+ border-top: 8px solid transparent;
+ border-right: 8px solid transparent;
+ border-bottom: 8px solid $tooltip-bg;
+ border-left: 8px solid transparent;
+ }
+ }
+}
diff --git a/content/v2.0/_index.md b/content/v2.0/_index.md
index 493c029d2..a45ca5313 100644
--- a/content/v2.0/_index.md
+++ b/content/v2.0/_index.md
@@ -10,8 +10,8 @@ menu:
#### Welcome
Welcome to the InfluxDB v2.0 documentation!
-InfluxDB is an open source time series database designed to handle high write and query loads.
+InfluxDB is an open source time series database designed to handle high write and query workloads.
This documentation is meant to help you learn how to use and leverage InfluxDB to meet your needs.
-Common use cases include infrastructure monitoring, IoT data collection, events handling and more.
+Common use cases include infrastructure monitoring, IoT data collection, events handling, and more.
If your use case involves time series data, InfluxDB is purpose-built to handle it.
diff --git a/content/v2.0/cloud/about/_index.md b/content/v2.0/cloud/about/_index.md
deleted file mode 100644
index 523c5d176..000000000
--- a/content/v2.0/cloud/about/_index.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: About InfluxDB Cloud 2.0
-description: Important information about InfluxDB Cloud 2.0 including release notes and known issues.
-weight: 10
-menu:
- v2_0_cloud:
- name: About InfluxDB Cloud
----
-
-Important information about InfluxDB Cloud 2.0 including known issues and release notes.
-
-{{< children >}}
diff --git a/content/v2.0/cloud/about/known-issues.md b/content/v2.0/cloud/about/known-issues.md
deleted file mode 100644
index 234ab8324..000000000
--- a/content/v2.0/cloud/about/known-issues.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: Known issues in InfluxDB Cloud
-description: Information related to known issues in InfluxDB Cloud 2.
-weight: 102
-menu:
- v2_0_cloud:
- name: Known issues
- parent: About InfluxDB Cloud
----
-
-The following issues currently exist in {{< cloud-name >}}:
-
-- IDPE 2868: Users can delete a token with an active Telegraf configuration pointed to it.
-- [TELEGRAF-5600](https://github.com/influxdata/telegraf/issues/5600): Improve error message in Telegraf when the bucket it's reporting to is not found.
-- [INFLUXDB-12687](https://github.com/influxdata/influxdb/issues/12687): Create organization button should only be displayed for users with permissions to create an organization.
diff --git a/content/v2.0/cloud/account-management/_index.md b/content/v2.0/cloud/account-management/_index.md
new file mode 100644
index 000000000..2656f9d89
--- /dev/null
+++ b/content/v2.0/cloud/account-management/_index.md
@@ -0,0 +1,12 @@
+---
+title: Manage your InfluxDB Cloud 2.0 Account
+description: >
+ View and manage information related to your InfluxDB Cloud 2.0 account such as
+ pricing plans, data usage, account cancelation, etc.
+weight: 3
+menu:
+ v2_0_cloud:
+ name: Account management
+---
+
+{{< children >}}
diff --git a/content/v2.0/cloud/account-management/billing.md b/content/v2.0/cloud/account-management/billing.md
new file mode 100644
index 000000000..de4e41ff4
--- /dev/null
+++ b/content/v2.0/cloud/account-management/billing.md
@@ -0,0 +1,99 @@
+---
+title: Add payment method and view billing
+list_title: Add payment and view billing
+description: >
+ Add your InfluxDB Cloud payment method and view billing information.
+weight: 103
+menu:
+ v2_0_cloud:
+ parent: Account management
+ name: Add payment and view billing
+---
+
+- Hover over the **Usage** icon in the left navigation bar and select **Billing**.
+
+ {{< nav-icon "cloud" >}}
+
+ Complete the following procedures as needed:
+
+ - [Add or update your {{< cloud-name >}} payment method](#add-or-update-your-influxdb-cloud-2-0-payment-method)
+ - [Add or update your contact information](#add-or-update-your-contact-information)
+ - [Send notifications when usage exceeds an amount](#send-notifications-when-usage-exceeds-an-amount)
+
+ View information about:
+
+ - [Pay As You Go billing](#view-pay-as-you-go-billing-information)
+ - [Free plan](#view-free-plan-information)
+ - [Exceeded rate limits](#exceeded-rate-limits)
+ - [Billing cycle](#billing-cycle)
+ - [Declined or late payments](#declined-or-late-payments)
+
+### Add or update your InfluxDB Cloud 2.0 payment method
+
+1. On the Billing page:
+ - To update, click the **Change Payment** button on the Billing page.
+ - In the **Payment Method** section:
+ - Enter your cardholder name and number
+ - Select your expiration month and year
+ - Enter your CVV code and select your card type
+ - Enter your card billing address
+
+2. Click **Add Card**.
+
+### Add or update your contact information
+
+1. On the Billing page:
+ - To update, click the **Edit Information** button.
+ - In the **Contact Information** section, enter your name, company, and address.
+2. Click **Save Contact Info**.
+
+### Send notifications when usage exceeds an amount
+
+1. On the Billing page, click **Notification Settings**.
+2. Select the **Send email notification** toggle, and then enter the email address to notify.
+3. Enter the dollar amount to trigger a notification email. By default, an email is triggered when the amount exceeds $10. (Whole dollar amounts only. For example, $10.50 is not a supported amount.)
+
+### View Pay As You Go billing information
+
+- On the Billing page, view your billing information, including:
+ - Account balance
+ - Last billing update (updated hourly)
+ - Past invoices
+ - Payment method
+ - Contact information
+ - Notification settings
+
+### View Free plan information
+
+- On the Billing page, view the total limits available for the Free plan.
+
+### Exceeded rate limits
+
+If you exceed your plan's [rate limits](/v2.0/cloud/pricing-plans/), {{< cloud-name >}} provides a notification in the {{< cloud-name "short" >}} user interface (UI) and adds a rate limit event to your **Usage** page for review.
+
+All rate-limited requests are rejected; including both read and write requests.
+_Rate-limited requests are **not** queued._
+
+_To remove rate limits, [upgrade to a Pay As You Go Plan](/v2.0/cloud/account-management/upgrade-to-payg/)._
+
+#### Rate-limited HTTP response code
+
+When a request exceeds your plan's rate limit, the InfluxDB API returns the following response:
+
+```
+HTTP 429 “Too Many Requests”
+Retry-After: xxx (seconds to wait before retrying the request)
+```
+
+### Billing cycle
+
+Billing occurs on the first day of the month for the previous month. For example, if you start the Pay As You Go plan on September 15, you're billed on October 1 for your usage from September 15-30.
+
+### Declined or late payments
+
+| Timeline | Action |
+|:----------------------------|:------------------------------------------------------------------------------------------------------------------------|
+| **Initial declined payment**| We'll retry charge every 72 hours. During this period, update your payment method to successfully process your payment. |
+| **One week later** | Account disabled except data writes. Update your payment method to successfully process your payment and enable your account. |
+| **10-14 days later** | Account completely disabled. During this period, you must contact us at support@influxdata.com to process your payment and enable your account. |
+| **21 days later** | Account suspended. Contact support@influxdata.com to settle your final bill and retrieve a copy of your data or access to InfluxDB Cloud dashboards, tasks, Telegraf configurations, and so on.|
diff --git a/content/v2.0/cloud/account-management/data-usage.md b/content/v2.0/cloud/account-management/data-usage.md
new file mode 100644
index 000000000..eb5a8c1e5
--- /dev/null
+++ b/content/v2.0/cloud/account-management/data-usage.md
@@ -0,0 +1,49 @@
+---
+title: View InfluxDB Cloud data usage
+list_title: View data usage
+description: >
+ View your InfluxDB Cloud 2.0 data usage and rate limit notifications.
+weight: 103
+menu:
+ v2_0_cloud:
+ parent: Account management
+ name: View data usage
+---
+
+To view your {{< cloud-name >}} data usage, hover over the **Usage** icon in the
+left navigation bar and select **Usage**.
+
+{{< nav-icon "usage" >}}
+
+The usage page provides data usage information for time frame specified in the
+drop-down at the top of the Usage page.
+
+- **Writes:** Total data in MB written to your {{< cloud-name "short" >}} instance.
+- **Reads:** Total data in MB sent as responses to queries from your {{< cloud-name "short" >}} instance.
+- **Query Duration:** Total time spent processing queries in seconds.
+- **Storage Usage:** Total disk usage in gigabytes.
+- **API Request Count:** The total number of query and write API requests received
+ during the specified time frame.
+- **Usage over the specified time period:** A line graph that visualizes usage over the specified time period.
+- **Rate Limits over the specified time period:** A list of rate limit events over
+ the specified time period.
+
+{{< img-hd src="/img/2-0-cloud-usage.png" />}}
+
+## Exceeded rate limits
+If you exceed your plan's [rate limits](/v2.0/cloud/pricing-plans/), {{< cloud-name >}}
+will provide a notification in the {{< cloud-name "short" >}} user interface (UI)
+and add a rate limit event to your **Usage** page for review.
+
+All rate-limited requests are rejected; including both read and write requests.
+_Rate-limited requests are **not** queued._
+
+_To remove rate limits, [upgrade to a Pay As You Go Plan](/v2.0/cloud/account-management/upgrade-to-payg/)._
+
+### Rate-limited HTTP response code
+When a request exceeds your plan's rate limit, the InfluxDB API returns the following response:
+
+```
+HTTP 429 “Too Many Requests”
+Retry-After: xxx (seconds to wait before retrying the request)
+```
diff --git a/content/v2.0/cloud/account-management/offboarding.md b/content/v2.0/cloud/account-management/offboarding.md
new file mode 100644
index 000000000..6dbe5a601
--- /dev/null
+++ b/content/v2.0/cloud/account-management/offboarding.md
@@ -0,0 +1,63 @@
+---
+title: Cancel your InfluxDB Cloud subscription
+description: >
+ Cancel your InfluxDB Cloud 2.0 account at any time by stopping all read and write
+ requests, backing up data, and contacting InfluxData Support.
+weight: 104
+menu:
+ v2_0_cloud:
+ parent: Account management
+ name: Cancel InfluxDB Cloud
+---
+
+To cancel your {{< cloud-name >}} subscription, complete the following steps:
+
+1. [Stop reading and writing data](#stop-reading-and-writing-data).
+2. [Export data and other artifacts](#export-data-and-other-artifacts).
+3. [Cancel service](#cancel-service).
+
+### Stop reading and writing data
+
+To stop being charged for {{< cloud-name "short" >}}, pause all writes and queries.
+
+### Export data and other artifacts
+
+To export data and artifacts, follow the steps below.
+
+{{% note %}}
+Exported data and artifacts can be used in an InfluxDB OSS instance.
+{{% /note %}}
+
+#### Export tasks
+
+For details, see [Export a task](/v2.0/process-data/manage-tasks/export-task/).
+
+#### Export dashboards
+
+For details, see [Export a dashboard](/v2.0/visualize-data/dashboards/export-dashboard/).
+
+#### Telegraf configurations
+
+**To save a Telegraf configuration:**
+
+1. Click in the **Settings** icon in the navigation bar.
+
+ {{< nav-icon "settings" >}}
+
+2. Select the **Telegraf** tab. A list of existing Telegraf configurations appears.
+3. Click on the name of a Telegraf configuration.
+4. Click **Download Config** to save.
+
+#### Data backups
+
+To request a backup of data in your {{< cloud-name "short" >}} instance, contact [InfluxData Support](mailto:support@influxdata.com).
+
+### Cancel service
+
+1. Hover over the Usage icon in the left navigation bar and select Billing.
+
+ {{< nav-icon "usage" >}}
+
+2. Click **Cancel Service**.
+3. Select **I understand and agree to these conditions**, and then click **I understand, Cancel Service.**
+4. Click **Confirm and Cancel Service**. Your payment method is charged your final balance immediately upon cancellation of service.
diff --git a/content/v2.0/cloud/account-management/upgrade-to-payg.md b/content/v2.0/cloud/account-management/upgrade-to-payg.md
new file mode 100644
index 000000000..0d21a880c
--- /dev/null
+++ b/content/v2.0/cloud/account-management/upgrade-to-payg.md
@@ -0,0 +1,30 @@
+---
+title: Upgrade to a Pay As You Go Plan
+description: >
+ Upgrade to a Pay As You Go Plan to remove rate limits from your InfluxDB Cloud 2.0 account.
+weight: 102
+menu:
+ v2_0_cloud:
+ parent: Account management
+ name: Upgrade to Pay As You Go
+---
+
+To upgrade to a Pay As You Go Plan:
+
+1. Hover over the **Usage** icon in the left navigation bar and select **Billing**.
+
+ {{< nav-icon "usage" >}}
+
+2. Click **Upgrade to Pay As You Go**.
+3. Review the terms and pricing associated with the Pay As You Go Plan.
+4. Click **Sounds Good To Me**.
+5. Enter your contact information.
+ Traditionally this would be "shipping" information, but InfluxData does not ship anything.
+ This information should be the primary location where the service is consumed.
+ All service updates, security notifications and other important information are
+ sent using the information you provide.
+ The address is used to determine any applicable sales tax.
+security notifications, etc.
+6. Enter your payment information and click **Add Card**.
+7. Review the plan details, contact information, and credit card information.
+8. Click **Confirm & Order**.
diff --git a/content/v2.0/cloud/get-started.md b/content/v2.0/cloud/get-started.md
index d8d2d25a4..7a1e9fb23 100644
--- a/content/v2.0/cloud/get-started.md
+++ b/content/v2.0/cloud/get-started.md
@@ -1,93 +1,142 @@
---
-title: Get started with InfluxDB Cloud 2.0 Beta
+title: Get started with InfluxDB Cloud 2.0
description: >
- Sign up for and get started with InfluxDB Cloud 2.0 Beta.
+ Sign up now, sign in, and get started exploring and using the InfluxDB Cloud 2.0 time series platform.
weight: 1
menu:
v2_0_cloud:
name: Get started with InfluxDB Cloud
---
-{{< cloud-name >}} is a fully managed and hosted version of the InfluxDB 2.0.
-To get started, complete the tasks below.
-{{% cloud-msg %}}
-InfluxDB v2.0 alpha documentation applies to {{< cloud-name "short" >}} unless otherwise specified.
-{{% /cloud-msg %}}
+{{< cloud-name >}} is a fully managed, hosted, multi-tenanted version of the
+InfluxDB 2.0 time series data platform.
+The core of {{< cloud-name "short" >}} is built on the foundation of the open source
+version of InfluxDB 2.0, which is much more than a database.
+It is a time series data platform that collects, stores, processes and visualizes metrics and events.
+
+_See the differences between {{< cloud-name "short">}} and InfluxDB OSS
+[below](#differences-between-influxdb-cloud-and-influxdb-oss)._
+
+## Start for free
+Start using {{< cloud-name >}} at no cost with the [Free Plan](/v2.0/cloud/pricing-plans/#free-plan).
+Use it as much and as long as you like within the plan's rate-limits.
+Limits are designed to let you monitor 5-10 sensors, stacks or servers comfortably.
+Once you're ready to grow, [upgrade to the Pay As You Go Plan](/v2.0/cloud/account-management/upgrade-to-payg/).
## Sign up
-1. Go to [InfluxDB Cloud 2.0]({{< cloud-link >}}), enter your email and password,
- and then click **Sign Up**.
+1. Go to [InfluxDB Cloud 2.0]({{< cloud-link >}}), enter your email address and password,
+ and click **Sign Up**.
+2. InfluxDB Cloud requires email verification to complete the sign up process.
+ Verify your email address by opening the email sent to the address you provided
+ and clicking **Verify Your Email**.
+3. Select a region for you {{< cloud-name >}} instance.
+ Currently, {{< cloud-name >}} AWS - US West (Oregon) is the only region available.
+ _To suggest regions to add, click **Let us know** under Regions._
+4. Review the terms of the agreement, and then select
+ **I have viewed and agree to InfluxDB Cloud 2.0 Services Subscription Agreement
+ and InfluxData Global Data Processing Agreement.**.
-2. Open email from cloudbeta@influxdata.com (subject: Please verify your email for InfluxDB Cloud),
- and then click **Verify Your Email**. The Welcome to InfluxDB Cloud 2.0 page is displayed.
+ For details on the agreements, see the [InfluxDB Cloud 2.0: Services Subscription Agreement](https://www.influxdata.com/legal/terms-of-use/)
+ and the [InfluxData Global Data Processing Agreement](https://www.influxdata.com/legal/influxdata-global-data-processing-agreement/).
-3. Currently, {{< cloud-name >}} us-west-2 region is the only region available.
- To suggest regions to add, click the **Let us know** link under Regions.
+5. Click **Continue**. {{< cloud-name >}} opens with a default organization
+ and bucket (both created from your email address).
-4. Review the terms of the beta agreement, and then select
- **I viewed and agree to InfluxDB Cloud 2.0 Beta Agreement**.
+ _To update organization and bucket names, see [Update an organization](/v2.0/organizations/update-org/)
+ and [Update a bucket](/v2.0/organizations/buckets/update-bucket/#update-a-bucket-s-name-in-the-influxdb-ui)._
-5. Click **Continue**. InfluxDB Cloud 2.0 opens with a default organization
- (created from your email) and bucket (created from your email local-part).
+{{% cloud-msg %}}
+All InfluxDB 2.0 documentation applies to {{< cloud-name "short" >}} unless otherwise specified.
+References to the InfluxDB user interface (UI) or localhost:9999 refer to your
+{{< cloud-name >}} UI.
+{{% /cloud-msg %}}
-## Log in
-Log in to [InfluxDB Cloud 2.0](https://us-west-2-1.aws.cloud2.influxdata.com) using the credentials created above.
+## Sign in
+
+Sign in to [InfluxDB Cloud 2.0](https://cloud2.influxdata.com) using your email address and password.
+
+Sign in to InfluxDB Cloud 2.0 now
## Collect and write data
-Collect and write data to InfluxDB using Telegraf, the InfluxDB v2 API, `influx`
-command line interface (CLI), the InfluxDB user interface (UI), or client libraries.
+
+Collect and write data to InfluxDB using the Telegraf plugins, the InfluxDB v2 API, the `influx`
+command line interface (CLI), the InfluxDB UI (the user interface for InfluxDB 2.0), or the InfluxDB v2 API client libraries.
### Use Telegraf
+
Use Telegraf to quickly write data to {{< cloud-name >}}.
-Create new Telegraf configurations automatically in the UI or manually update an
+Create new Telegraf configurations automatically in the InfluxDB UI, or manually update an
existing Telegraf configuration to send data to your {{< cloud-name "short" >}} instance.
For details, see [Automatically configure Telegraf](/v2.0/write-data/use-telegraf/auto-config/#create-a-telegraf-configuration)
and [Manually update Telegraf configurations](/v2.0/write-data/use-telegraf/manual-config/).
### API, CLI, and client libraries
-For information about using the InfluxDB API, CLI, and client libraries to write data,
+
+For information about using the InfluxDB v2 API, `influx` CLI, and client libraries to write data,
see [Write data to InfluxDB](/v2.0/write-data/).
{{% note %}}
+
#### InfluxDB Cloud instance endpoint
-When using Telegraf, the API, CLI, or client libraries to interact with your {{< cloud-name "short" >}}
+
+When using Telegraf, the InfluxDB v2 API, the `influx` CLI, or the client libraries to interact with your {{< cloud-name "short" >}}
instance, extract the "host" or "endpoint" of your instance from your {{< cloud-name "short" >}} UI URL.
For example:
```
https://us-west-2-1.aws.cloud2.influxdata.com
```
+
{{% /note %}}
## Query and visualize data
+
Once you've set up {{< cloud-name "short" >}} to collect data, you can do the following:
-- Query data using Flux, the UI, and the `influx` command line interface. See [Query data](/v2.0/query-data/).
-- Build custom dashboards to visualize your data. See [Visualize data](/v2.0/visualize-data/).
+- Query data using Flux, the UI, and the `influx` command line interface.
+ See [Query data](/v2.0/query-data/).
+- Build custom dashboards to visualize your data.
+ See [Visualize data](/v2.0/visualize-data/).
+
+## Process data
+
+Use InfluxDB tasks to process and downsample data. See [Process data](/v2.0/process-data/).
## View data usage
-Once you've set up {{< cloud-name "short" >}} to collect data, view your data usage, including:
-- **Writes:** Total kilobytes ingested.
-- **Reads:** Total kilobytes sent out for responses to queries.
-- **Total Query Duration:** Sum of time spent processing queries in seconds.
-- **Storage:** Average disk usage in gigabytes.
+Once you're up and running with {{< cloud-name "short" >}}, [monitor your data usage in
+your {{< cloud-name "short" >}} UI](/v2.0/cloud/account-management/data-usage/).
-You'll see sparkline data over the past 4 hours and a single value that shows usage in the last 5 minutes.
-To view your data, click **Usage** in the left navigation menu.
+## Differences between InfluxDB Cloud and InfluxDB OSS
+{{< cloud-name >}} is API-compatible and functionally compatible with InfluxDB OSS 2.0.
+The primary differences between InfluxDB OSS 2.0 and InfluxDB Cloud 2.0 are:
-{{< img-hd src="/img/2-0-cloud-usage.png" />}}
+- [InfluxDB scrapers](/v2.0/write-data/scrape-data/) that collect data from specified
+ targets are not available in {{< cloud-name "short" >}}.
+- {{< cloud-name "short" >}} instances are currently limited to a single organization with a single user.
+- Retrieving data from a file based CSV source using the `file` parameter of the
+ [`csv.from()`](/v2.0/reference/flux/functions/csv/from) function is not supported;
+ however you can use raw CSV data with the `csv` parameter.
+- Multi-organization accounts and multi-user organizations are currently not
+ available in {{< cloud-name >}}.
-## Review rate limits
-To optimize InfluxDB Cloud 2.0 services, [rate limits](/v2.0/cloud/rate-limits/) are in place for Free tier users.
-During beta, you can check out our Paid tier for free.
+### New features in InfluxDB Cloud 2.0
-To upgrade to Paid tier for free, discuss use cases, or increase rate limits,
-reach out to cloudbeta@influxdata.com.
-
-{{% note %}}
-#### Known issues and disabled features
-_See [Known issues](/v2.0/cloud/about/known-issues/) for information regarding all known issues in InfluxDB Cloud._
-{{% /note %}}
+- **Free Plan (rate-limited)**: Skip downloading and installing InfluxDB 2.0 and
+ jump right in to exploring InfluxDB 2.0 technology.
+ The Free Plan is designed for getting started with InfluxDB and for small hobby projects.
+- **Flux support**: [Flux](/v2.0/query-data/get-started/) is a standalone data
+ scripting and query language that increases productivity and code reuse.
+ It is the primary language for working with data within InfluxDB 2.0.
+ Flux can be used with other data sources as well.
+ This allows users to work with data where it resides.
+- **Unified API**: Everything in InfluxDB (ingest, query, storage, and visualization)
+ is now accessible using a unified [InfluxDB v2 API](/v2.0/reference/api/) that
+ enables seamless movement between open source and cloud.
+- **Integrated visualization and dashboards**: Based on the pioneering Chronograf project,
+ the new user interface (InfluxDB UI) offers quick and effortless onboarding,
+ richer user experiences, and significantly quicker results.
+- **Usage-based pricing**: The [The Pay As You Go Plan](/v2.0/cloud/pricing-plans/#pay-as-you-go-plan)
+ offers more flexibility and ensures that you only pay for what you use. To estimate your projected usage costs, use the [InfluxDB Cloud 2.0 pricing calculator](/v2.0/cloud/pricing-calculator/).
diff --git a/content/v2.0/cloud/pricing-calculator.md b/content/v2.0/cloud/pricing-calculator.md
new file mode 100644
index 000000000..bca77ea4b
--- /dev/null
+++ b/content/v2.0/cloud/pricing-calculator.md
@@ -0,0 +1,49 @@
+---
+title: InfluxDB Cloud 2.0 pricing calculator
+description: >
+ Use the InfluxDB Cloud 2.0 pricing calculator to estimate costs by adjusting the number of devices,
+ plugins, metrics, and writes for the Pay As You Go Plan.
+weight: 2
+menu:
+ v2_0_cloud:
+ name: Pricing calculator
+---
+
+Use the {{< cloud-name >}} pricing calculator to estimate costs for the Pay As You Go plan by adjusting your number of devices,
+ plugins, users, dashboards, writes, and retention. Default configurations include:
+
+| Configuration | Hobby | Standard | Professional | Enterprise |
+|:-----------------------------------|-------:|---------:|-------------:|-----------:|
+| **Devices** | 8 | 200 | 500 | 1000 |
+| **Plugins per device** | 1 | 4 | 4 | 5 |
+| **Users** | 1 | 2 | 10 | 20 |
+| **Concurrent dashboards per user** | 2 | 2 | 2 | 2 |
+| **Writes per minute** | 6 | 4 | 3 | 3 |
+| **Average retention in days** | 7 | 30 | 30 | 30 |
+
+Guidelines used to estimate costs for default configurations:
+
+- Average metrics per plugin = 25
+- Average KB per value = 0.01
+- Number of cells per dashboard = 10
+- Average response KB per cell = 0.5
+- Average query duration = 75ms
+
+**To estimate costs**
+
+1. Do one of the following:
+
+ - Free plan. Hover over the **Usage** icon in the left navigation bar and select **Billing**.
+
+ {{< nav-icon "cloud" >}}
+
+ Then click the **Pricing Calculator** link at the bottom of the page.
+ - Pay As You Go plan. Open the pricing calculator [here](https://cloud2.influxdata.com/pricing).
+3. Choose your region.
+4. Select your configuration:
+ - **Hobby**. For a single user monitoring a few machines or sensors.
+ - **Standard**. For a single team requiring real-time visibility and monitoring a single set of use cases.
+ - **Professional**. For teams monitoring multiple disparate systems or use cases.
+ - **Enterprise**. For teams monitoring multiple domains and use cases accessing a variety of dashboards.
+5. Adjust the default configuration values to match your number of devices, plugins, metrics, and so on. The **Projected Usage** costs are automatically updated as you adjust your configuration.
+6. Click **Get started with InfluxDB Cloud** [to get started](https://v2.docs.influxdata.com/v2.0/cloud/get-started/).
diff --git a/content/v2.0/cloud/pricing-plans.md b/content/v2.0/cloud/pricing-plans.md
new file mode 100644
index 000000000..3582ffd0b
--- /dev/null
+++ b/content/v2.0/cloud/pricing-plans.md
@@ -0,0 +1,65 @@
+---
+title: InfluxDB Cloud 2.0 pricing plans
+description: >
+ InfluxDB Cloud 2.0 provides two pricing plans to fit your needs – the rate-limited
+ Free Plan and the Pay As You Go Plan.
+aliases:
+ - /v2.0/cloud/rate-limits/
+weight: 2
+menu:
+ v2_0_cloud:
+ name: Pricing plans
+---
+
+InfluxDB Cloud 2.0 offers two pricing plans:
+
+- [Free Plan](#free-plan)
+- [Pay As You Go Plan](#pay-as-you-go-plan)
+
+To estimate your projected usage costs, use the [InfluxDB Cloud 2.0 pricing calculator](/v2.0/cloud/pricing-calculator/).
+
+## Free Plan
+
+All new {{< cloud-name >}} accounts start with a rate-limited Free Plan.
+Use this plan as much and as long as you want within the Free Plan rate limits:
+
+#### Free Plan rate limits
+
+- **Writes:** 3MB every 5 minutes
+- **Query:** 30MB every 5 minutes
+- **Storage:** 72-hour data retention
+- **Series cardinality:** 10,000
+- **Create:**
+ - Up to 5 dashboards
+ - Up to 5 tasks
+ - Up to 2 buckets
+ - Up to 2 checks
+ - Up to 2 notification rules
+ - Unlimited Slack notification endpoints
+
+
+_To remove rate limits, [upgrade to a Pay As You Go Plan](/v2.0/cloud/account-management/upgrade-to-payg/)._
+
+## Pay As You Go Plan
+
+The Pay As You Go Plan offers more flexibility and ensures you only pay for what you [use]((/v2.0/cloud/account-management/data-usage/).
+
+#### Pay As You Go Plan rate limits
+
+To protect against any intentional or unintentional harm, Pay As You Go Plans include soft rate limits:
+
+- **Writes:** 300MB every 5 minutes
+- **Ingest batch size:** 50MB
+- **Queries:** 3000MB every 5 minutes
+- **Storage:** Unlimited retention
+- **Series cardinality:** 1,000,000
+- **Create:**
+ - Unlimited dashboards
+ - Unlimited tasks
+ - Unlimited buckets
+ - Unlimited users
+ - Unlimited checks
+ - Unlimited notification rules
+ - Unlimited PagerDuty, Slack, and HTTP notification endpoints
+
+_To request higher rate limits, contact [InfluxData Support](mailto:support@influxdata.com)._
diff --git a/content/v2.0/cloud/rate-limits.md b/content/v2.0/cloud/rate-limits.md
deleted file mode 100644
index d6646553a..000000000
--- a/content/v2.0/cloud/rate-limits.md
+++ /dev/null
@@ -1,36 +0,0 @@
----
-title: InfluxDB Cloud 2.0 rate limits
-description: Rate limits for Free tier users optimize InfluxDB Cloud 2.0 services.
-weight: 2
-menu:
- v2_0_cloud:
- name: Rate limits
----
-
-To optimize InfluxDB Cloud 2.0 services, the following rate limits are in place for Free tier users.
-To increase your rate limits, contact cloudbeta@influxdata.com.
-
-- `write` endpoint:
- - 5 concurrent API calls
- - 3000 KB (10 KB/s) of data written in a 5 minute window
-
-- `query` endpoint:
- - 20 concurrent API calls
- - 3000 MB (10 MB/s) of data returned in a 5 minute window
-
-- 5 dashboards
-- 5 tasks
-- 2 buckets
-- 72 hour retention period
-
-## View data usage
-To view data usage, click **Usage** in the left navigation bar.
-
-{{< nav-icon "usage" >}}
-
-## HTTP response codes
-
-When a request exceeds the rate limit for the endpoint, the InfluxDB API returns:
-
-- HTTP 429 “Too Many Requests”
-- Retry-After: xxx (seconds to wait before retrying the request)
diff --git a/content/v2.0/example.md b/content/v2.0/example.md
index 0c7560752..9793b5679 100644
--- a/content/v2.0/example.md
+++ b/content/v2.0/example.md
@@ -10,7 +10,11 @@ enterprise_all: true
#cloud_all: true
cloud_some: true
draft: true
-"v2.0/tags": [influxdb]
+"v2.0/tags": [influxdb, functions]
+related:
+ - /v2.0/write-data/
+ - /v2.0/write-data/quick-start
+ - https://influxdata.com, This is an external link
---
This is a paragraph. Lorem ipsum dolor ({{< icon "trash" >}}) sit amet, consectetur adipiscing elit. Nunc rutrum, metus id scelerisque euismod, erat ante suscipit nibh, ac congue enim risus id est. Etiam tristique nisi et tristique auctor. Morbi eu bibendum erat. Sed ullamcorper, dui id lobortis efficitur, mauris odio pharetra neque, vel tempor odio dolor blandit justo.
diff --git a/content/v2.0/get-started.md b/content/v2.0/get-started.md
index f886dc036..539d98f6c 100644
--- a/content/v2.0/get-started.md
+++ b/content/v2.0/get-started.md
@@ -27,7 +27,7 @@ This article describes how to get started with InfluxDB OSS. To get started with
### Download and install InfluxDB v2.0 alpha
Download InfluxDB v2.0 alpha for macOS.
-InfluxDB v2.0 alpha (macOS)
+InfluxDB v2.0 alpha (macOS)
### Unpackage the InfluxDB binaries
Unpackage the downloaded archive.
@@ -36,7 +36,7 @@ _**Note:** The following commands are examples. Adjust the file paths to your ow
```sh
# Unpackage contents to the current working directory
-gunzip -c ~/Downloads/influxdb_2.0.0-alpha.8_darwin_amd64.tar.gz | tar xopf -
+gunzip -c ~/Downloads/influxdb_2.0.0-alpha.18_darwin_amd64.tar.gz | tar xopf -
```
If you choose, you can place `influx` and `influxd` in your `$PATH`.
@@ -44,7 +44,7 @@ You can also prefix the executables with `./` to run then in place.
```sh
# (Optional) Copy the influx and influxd binary to your $PATH
-sudo cp influxdb_2.0.0-alpha.8_darwin_amd64/{influx,influxd} /usr/local/bin/
+sudo cp influxdb_2.0.0-alpha.18_darwin_amd64/{influx,influxd} /usr/local/bin/
```
{{% note %}}
@@ -90,8 +90,8 @@ influxd --reporting-disabled
### Download and install InfluxDB v2.0 alpha
Download the InfluxDB v2.0 alpha package appropriate for your chipset.
-InfluxDB v2.0 alpha (amd64)
-InfluxDB v2.0 alpha (arm)
+InfluxDB v2.0 alpha (amd64)
+InfluxDB v2.0 alpha (arm)
### Place the executables in your $PATH
Unpackage the downloaded archive and place the `influx` and `influxd` executables in your system `$PATH`.
@@ -100,10 +100,10 @@ _**Note:** The following commands are examples. Adjust the file names, paths, an
```sh
# Unpackage contents to the current working directory
-tar xvzf path/to/influxdb_2.0.0-alpha.10_linux_amd64.tar.gz
+tar xvzf path/to/influxdb_2.0.0-alpha.18_linux_amd64.tar.gz
# Copy the influx and influxd binary to your $PATH
-sudo cp influxdb_2.0.0-alpha.10_linux_amd64/{influx,influxd} /usr/local/bin/
+sudo cp influxdb_2.0.0-alpha.18_linux_amd64/{influx,influxd} /usr/local/bin/
```
{{% note %}}
diff --git a/content/v2.0/monitor-alert/_index.md b/content/v2.0/monitor-alert/_index.md
new file mode 100644
index 000000000..cd52ba5b2
--- /dev/null
+++ b/content/v2.0/monitor-alert/_index.md
@@ -0,0 +1,38 @@
+---
+title: Monitor data and send alerts
+seotitle: Monitor data and send alerts
+description: >
+ Monitor your time series data and send alerts by creating checks, notification
+ rules, and notification endpoints.
+menu:
+ v2_0:
+ name: Monitor & alert
+weight: 6
+v2.0/tags: [monitor, alert, checks, notification, endpoints]
+---
+
+Monitor your time series data and send alerts by creating checks, notification
+rules, and notification endpoints.
+
+## The monitoring workflow
+
+1. A [check](/v2.0/reference/glossary/#check) in InfluxDB queries data and assigns a status with a `_level` based on specific conditions.
+2. InfluxDB stores the output of a check in the `statuses` measurement in the `_monitoring` system bucket.
+3. [Notification rules](/v2.0/reference/glossary/#notification-rule) check data in the `statuses`
+ measurement and, based on conditions set in the notification rule, send a message
+ to a [notification endpoint](/v2.0/reference/glossary/#notification-endpoint).
+4. InfluxDB stores notifications in the `notifications` measurement in the `_monitoring` system bucket.
+
+## Monitor your data
+To get started, do the following:
+
+1. [Create checks](/v2.0/monitor-alert/checks/create/) to monitor data and assign a status.
+2. [Add notification endpoints](/v2.0/monitor-alert/notification-endpoints/create/)
+ to send notifications to third parties.
+3. [Create notification rules](/v2.0/monitor-alert/notification-rules/create) to check
+ statuses and send notifications to your notifications endpoints.
+
+
+## Manage your monitoring and alerting pipeline
+
+{{< children >}}
diff --git a/content/v2.0/monitor-alert/checks/_index.md b/content/v2.0/monitor-alert/checks/_index.md
new file mode 100644
index 000000000..2c84bb054
--- /dev/null
+++ b/content/v2.0/monitor-alert/checks/_index.md
@@ -0,0 +1,19 @@
+---
+title: Manage checks
+seotitle: Manage monitoring checks in InfluxDB
+description: >
+ Checks in InfluxDB query data and apply a status or level to each data point based on specified conditions.
+menu:
+ v2_0:
+ parent: Monitor & alert
+weight: 101
+v2.0/tags: [monitor, checks, notifications, alert]
+related:
+ - /v2.0/monitor-alert/notification-rules/
+ - /v2.0/monitor-alert/notification-endpoints/
+---
+
+Checks in InfluxDB query data and apply a status or level to each data point based on specified conditions.
+Learn how to create and manage checks:
+
+{{< children >}}
diff --git a/content/v2.0/monitor-alert/checks/create.md b/content/v2.0/monitor-alert/checks/create.md
new file mode 100644
index 000000000..c2b8964f1
--- /dev/null
+++ b/content/v2.0/monitor-alert/checks/create.md
@@ -0,0 +1,155 @@
+---
+title: Create checks
+seotitle: Create monitoring checks in InfluxDB
+description: >
+ Create a check in the InfluxDB UI.
+menu:
+ v2_0:
+ parent: Manage checks
+weight: 201
+related:
+ - /v2.0/monitor-alert/notification-rules/
+ - /v2.0/monitor-alert/notification-endpoints/
+---
+
+Create a check in the InfluxDB user interface (UI).
+Checks query data and apply a status to each point based on specified conditions.
+
+## Check types
+There are two types of checks – a threshold check and a deadman check.
+
+#### Threshold check
+A threshold check assigns a status based on a value being above, below,
+inside, or outside of defined thresholds.
+[Create a threshold check](#create-a-threshold-check).
+
+#### Deadman check
+A deadman check assigns a status to data when a series or group doesn't report
+in a specified amount of time.
+[Create a deadman check](#create-a-deadman-check).
+
+## Parts of a check
+A check consists of two parts – a query and check configuration.
+
+#### Check query
+- Specifies the dataset to monitor.
+- May include tags to narrow results.
+
+#### Check configuration
+- Defines check properties, including the check interval and status message.
+- Evaluates specified conditions and applies a status (if applicable) to each data point:
+ - `crit`
+ - `warn`
+ - `info`
+ - `ok`
+- Stores status in the `_level` column.
+
+## Create a check in the InfluxDB UI
+1. Click **Monitoring & Alerting** in the sidebar in the InfluxDB UI.
+
+ {{< nav-icon "alerts" >}}
+
+2. In the top right corner of the **Checks** column, click **{{< icon "plus" >}} Create**
+ and select the [type of check](#check-types) to create.
+3. Click **Name this check** in the top left corner and provide a unique name for the check.
+
+#### Configure the check query
+1. Select the **bucket**, **measurement**, **field** and **tag sets** to query.
+2. If creating a threshold check, select an **aggregate function**.
+ Aggregate functions aggregate data between the specified check intervals and
+ return a single value for the check to process.
+
+ In the **Aggregate functions** column, select an interval from the interval drop-down list
+ (for example, "Every 5 minutes") and an aggregate function from the list of functions.
+
+3. Click **Submit** to run the query and preview the results.
+ To see the raw query results, click the **{{< icon "toggle" >}} View Raw Data** toggle.
+
+#### Configure the check
+1. Click **2. Check** near the top of the window.
+2. In the **Properties** column, configure the following:
+
+ ##### Schedule Every
+ Select the interval to run the check (for example, "Every 5 minutes").
+ This interval matches the aggregate function interval for the check query.
+ _Changing the interval here will update the aggregate function interval._
+
+ ##### Offset
+ Delay the execution of a task to account for any late data.
+ Offset queries do not change the queried time range.
+
+ {{% note %}}Your offset must be shorter than your [check interval](#schedule-every).
+ {{% /note %}}
+
+ ##### Tags
+ Add custom tags to the query output.
+ Each custom tag appends a new column to each row in the query output.
+ The column label is the tag key and the column value is the tag value.
+
+ Use custom tags to associate additional metadata with the check.
+ Common metadata tags across different checks lets you easily group and organize checks.
+ You can also use custom tags in [notification rules](/v2.0/monitor-alert/notification-rules/create/).
+
+3. In the **Status Message Template** column, enter the status message template for the check.
+ Use [Flux string interpolation](/v2.0/reference/flux/language/string-interpolation/)
+ to populate the message with data from the query.
+
+ {{% note %}}
+#### Flux only interpolates string values
+Flux currently interpolates only string values.
+Use the [string() function](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/string/)
+to convert non-string values to strings.
+
+```js
+count = 12
+"I currently have ${string(v: count)} cats."
+```
+ {{% /note %}}
+
+ Check data is represented as an object, `r`.
+ Access specific column values using dot notation: `r.columnName`.
+
+ Use data from the following columns:
+
+ - columns included in the query output
+ - [custom tags](#tags) added to the query output
+ - `_check_id`
+ - `_check_name`
+ - `_level`
+ - `_source_measurement`
+ - `_type`
+
+ ###### Example status message template
+ ```
+ From ${r._check_name}:
+ ${r._field} is ${r._level}.
+ Its value is ${string(v: r._value)}.
+ ```
+
+ When a check generates a status, it stores the message in the `_message` column.
+
+4. Define check conditions that assign statuses to points.
+ Condition options depend on your check type.
+
+ ##### Configure a threshold check
+ 1. In the **Thresholds** column, click the status name (CRIT, WARN, INFO, or OK)
+ to define conditions for that specific status.
+ 2. From the **When value** drop-down list, select a threshold: is above, is below,
+ is inside of, is outside of.
+ 3. Enter a value or values for the threshold.
+ You can also use the threshold sliders in the data visualization to define threshold values.
+
+ ##### Configure a deadman check
+ 1. In the **Deadman** column, enter a duration for the deadman check in the **for** field.
+ For example, `90s`, `5m`, `2h30m`, etc.
+ 2. Use the **set status to** drop-down list to select a status to set on a dead series.
+ 3. In the **And stop checking after** field, enter the time to stop monitoring the series.
+ For example, `30m`, `2h`, `3h15m`, etc.
+
+5. Click the green **{{< icon "check" >}}** in the top right corner to save the check.
+
+## Clone a check
+Create a new check by cloning an existing check.
+
+1. In the **Checks** column, hover over the check you want to clone.
+2. Click the **{{< icon "clone" >}}** icon, then **Clone**.
diff --git a/content/v2.0/monitor-alert/checks/delete.md b/content/v2.0/monitor-alert/checks/delete.md
new file mode 100644
index 000000000..8e884679f
--- /dev/null
+++ b/content/v2.0/monitor-alert/checks/delete.md
@@ -0,0 +1,34 @@
+---
+title: Delete checks
+seotitle: Delete monitoring checks in InfluxDB
+description: >
+ Delete checks in the InfluxDB UI.
+menu:
+ v2_0:
+ parent: Manage checks
+weight: 204
+related:
+ - /v2.0/monitor-alert/notification-rules/
+ - /v2.0/monitor-alert/notification-endpoints/
+---
+
+If you no longer need a check, use the InfluxDB user interface (UI) to delete it.
+
+{{% warn %}}
+Deleting a check cannot be undone.
+{{% /warn %}}
+
+1. Click **Monitoring & Alerting** in the sidebar.
+
+ {{< nav-icon "alerts" >}}
+
+2. In the **Checks** column, hover over the check you want to delete, click the
+ **{{< icon "delete" >}}** icon, then **Delete**.
+
+After a check is deleted, all statuses generated by the check remain in the `_monitoring`
+bucket until the retention period for the bucket expires.
+
+{{% note %}}
+You can also [disable a check](/v2.0/monitor-alert/checks/update/#enable-or-disable-a-check)
+without having to delete it.
+{{% /note %}}
diff --git a/content/v2.0/monitor-alert/checks/update.md b/content/v2.0/monitor-alert/checks/update.md
new file mode 100644
index 000000000..d2c9af61c
--- /dev/null
+++ b/content/v2.0/monitor-alert/checks/update.md
@@ -0,0 +1,62 @@
+---
+title: Update checks
+seotitle: Update monitoring checks in InfluxDB
+description: >
+ Update, rename, enable or disable checks in the InfluxDB UI.
+menu:
+ v2_0:
+ parent: Manage checks
+weight: 203
+related:
+ - /v2.0/monitor-alert/notification-rules/
+ - /v2.0/monitor-alert/notification-endpoints/
+---
+
+Update checks in the InfluxDB user interface (UI).
+Common updates include:
+
+- [Update check queries and logic](#update-check-queries-and-logic)
+- [Enable or disable a check](#enable-or-disable-a-check)
+- [Rename a check](#rename-a-check)
+- [Add or update a check description](#add-or-update-a-check-description)
+- [Add a label to a check](#add-a-label-to-a-check)
+
+To update checks, click **Monitoring & Alerting** in the InfluxDB UI sidebar.
+
+{{< nav-icon "alerts" >}}
+
+
+## Update check queries and logic
+1. In the **Checks** column, click the name of the check you want to update.
+ The check builder appears.
+2. To edit the check query, click **1. Query** at the top of the check builder window.
+3. To edit the check logic, click **2. Check** at the top of the check builder window.
+
+_For details about using the check builder, see [Create checks](/v2.0/monitor-alert/checks/create/)._
+
+## Enable or disable a check
+In the **Checks** column, click the {{< icon "toggle" >}} toggle next to a check
+to enable or disable it.
+
+## Rename a check
+1. In the **Checks** column, hover over the name of the check you want to update.
+2. Click the **{{< icon "edit" >}}** icon that appears next to the check name.
+2. Enter a new name and click out of the name field or press enter to save.
+
+_You can also rename a check in the [check builder](#update-check-queries-and-logic)._
+
+## Add or update a check description
+1. In the **Checks** column, hover over the check description you want to update.
+2. Click the **{{< icon "edit" >}}** icon that appears next to the description.
+2. Enter a new description and click out of the name field or press enter to save.
+
+## Add a label to a check
+1. In the **Checks** column, click **Add a label** next to the check you want to add a label to.
+ The **Add Labels** box opens.
+2. To add an existing label, select the label from the list.
+3. To create and add a new label:
+ - In the search field, enter the name of the new label. The **Create Label** box opens.
+ - In the **Description** field, enter an optional description for the label.
+ - Select a color for the label.
+ - Click **Create Label**.
+4. To remove a label, hover over the label under to a rule and click **{{< icon "x" >}}**.
diff --git a/content/v2.0/monitor-alert/checks/view.md b/content/v2.0/monitor-alert/checks/view.md
new file mode 100644
index 000000000..a41f90713
--- /dev/null
+++ b/content/v2.0/monitor-alert/checks/view.md
@@ -0,0 +1,43 @@
+---
+title: View checks
+seotitle: View monitoring checks in InfluxDB
+description: >
+ View check details and statuses and notifications generated by checks in the InfluxDB UI.
+menu:
+ v2_0:
+ parent: Manage checks
+weight: 202
+related:
+ - /v2.0/monitor-alert/notification-rules/
+ - /v2.0/monitor-alert/notification-endpoints/
+---
+
+View check details and statuses and notifications generated by checks in the InfluxDB user interface (UI).
+
+- [View a list of all checks](#view-a-list-of-all-checks)
+- [View check details](#view-check-details)
+- [View statuses generated by a check](#view-statuses-generated-by-a-check)
+- [View notifications triggered by a check](#view-notifications-triggered-by-a-check)
+
+To view checks, click **Monitoring & Alerting** in the InfluxDB UI sidebar.
+
+{{< nav-icon "alerts" >}}
+
+## View a list of all checks
+The **Checks** column on the Monitoring & Alerting landing page displays all existing checks.
+
+## View check details
+In the **Checks** column, click the name of the check you want to view.
+The check builder appears.
+Here you can view the check query and logic.
+
+## View statuses generated by a check
+1. In the **Checks** column, hover over the check, click the **{{< icon "view" >}}**
+ icon, then **View History**.
+ The Statuses History page displays statuses generated by the selected check.
+
+## View notifications triggered by a check
+1. In the **Checks** column, hover over the check, click the **{{< icon "view" >}}**
+ icon, then **View History**.
+2. In the top left corner, click **Notifications**.
+ The Notifications History page displays notifications initiated by the selected check.
diff --git a/content/v2.0/monitor-alert/notification-endpoints/_index.md b/content/v2.0/monitor-alert/notification-endpoints/_index.md
new file mode 100644
index 000000000..f3d6fef42
--- /dev/null
+++ b/content/v2.0/monitor-alert/notification-endpoints/_index.md
@@ -0,0 +1,20 @@
+---
+title: Manage notification endpoints
+list_title: Manage notification endpoints
+description: >
+ Create, read, update, and delete endpoints in the InfluxDB UI.
+v2.0/tags: [monitor, endpoints, notifications, alert]
+menu:
+ v2_0:
+ parent: Monitor & alert
+weight: 102
+related:
+ - /v2.0/monitor-alert/checks/
+ - /v2.0/monitor-alert/notification-rules/
+---
+
+Notification endpoints store information to connect to a third party service.
+If you're using the Free plan, create a Slack endpoint.
+If you're using the Pay as You Go plan, create a connection to a HTTP, Slack, or PagerDuty endpoint.
+
+{{< children >}}
diff --git a/content/v2.0/monitor-alert/notification-endpoints/create.md b/content/v2.0/monitor-alert/notification-endpoints/create.md
new file mode 100644
index 000000000..829158886
--- /dev/null
+++ b/content/v2.0/monitor-alert/notification-endpoints/create.md
@@ -0,0 +1,45 @@
+---
+title: Create notification endpoints
+description: >
+ Create notification endpoints to send alerts on your time series data.
+menu:
+ v2_0:
+ name: Create endpoints
+ parent: Manage notification endpoints
+weight: 201
+related:
+ - /v2.0/monitor-alert/checks/
+ - /v2.0/monitor-alert/notification-rules/
+---
+
+To send notifications about changes in your data, start by creating a notification endpoint to a third party service. After creating notification endpoints, [create notification rules](/v2.0/monitor-alert/notification-rules/create) to send alerts to third party services on [check statuses](/v2.0/monitor-alert/checks/create).
+
+## Create a notification endpoint in the UI
+
+1. Select the **Monitoring and Alerting** icon from the sidebar.
+
+ {{< nav-icon "alerts" >}}
+
+2. Next to **Notification Endpoints**, click **Create**.
+3. From the **Destination** drop-down list, select a destination endpoint to send notifications.
+ The following endpoints are available for InfluxDB 2.0 OSS, the InfluxDB Cloud 2.0 Free Plan,
+ and the InfluxDB Cloud 2.0 Pay As You Go (PAYG) Plan:
+
+ | Endpoint | OSS | Free Plan _(Cloud)_ | PAYG Plan _(Cloud)_ |
+ |:-------- |:--------: |:-------------------: |:----------------------------:|
+ | **Slack** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | **{{< icon "check" >}}** |
+ | **PagerDuty** | **{{< icon "check" >}}** | | **{{< icon "check" >}}** |
+ | **HTTP** | **{{< icon "check" >}}** | | **{{< icon "check" >}}** |
+
+4. In the **Name** and **Description** fields, enter a name and description for the endpoint.
+5. Enter enter information to connect to the endpoint:
+
+ - For HTTP, enter the **URL** to send the notification. Select the **auth method** to use: **None** for no authentication. To authenticate with a username and password, select **Basic** and then enter credentials in the **Username** and **Password** fields. To authenticate with a token, select **Bearer**, and then enter the authentication token in the **Token** field.
+
+ - For Slack, create an [Incoming WebHook](https://api.slack.com/incoming-webhooks#posting_with_webhooks) in Slack, and then enter your webHook URL in the **Slack Incoming WebHook URL** field.
+
+ - For PagerDuty:
+ - [Create a new service](https://support.pagerduty.com/docs/services-and-integrations#section-create-a-new-service), [add an integration for your service](https://support.pagerduty.com/docs/services-and-integrations#section-add-integrations-to-an-existing-service), and then enter the PagerDuty integration key for your new service in the **Routing Key** field.
+ - The **Client URL** provides a useful link in your PagerDuty notification. Enter any URL that you'd like to use to investigate issues. This URL is sent as the `client_url` property in the PagerDuty trigger event. By default, the **Client URL** is set to your Monitoring & Alerting History page, and the following included in the PagerDuty trigger event: `"client_url": "https://twodotoh.a.influxcloud.net/orgs//alert-history”`
+
+6. Click **Create Notification Endpoint**.
diff --git a/content/v2.0/monitor-alert/notification-endpoints/delete.md b/content/v2.0/monitor-alert/notification-endpoints/delete.md
new file mode 100644
index 000000000..24a0dc71b
--- /dev/null
+++ b/content/v2.0/monitor-alert/notification-endpoints/delete.md
@@ -0,0 +1,24 @@
+---
+title: Delete notification endpoints
+description: >
+ Delete a notification endpoint in the InfluxDB UI.
+menu:
+ v2_0:
+ name: Delete endpoints
+ parent: Manage notification endpoints
+weight: 204
+related:
+ - /v2.0/monitor-alert/checks/
+ - /v2.0/monitor-alert/notification-rules/
+---
+
+If notifications are no longer sent to an endpoint, complete the steps below to delete the endpoint, and then [update notification rules](/v2.0/monitor-alert/notification-rules/update) with a new notification endpoint as needed.
+
+## Delete a notification endpoint in the UI
+
+1. Select the **Monitoring and Alerting** icon from the sidebar.
+
+ {{< nav-icon "alerts" >}}
+
+2. Under **Notification Rules**, find the rule you want to delete.
+3. Click the delete icon, then click **Delete** to confirm.
diff --git a/content/v2.0/monitor-alert/notification-endpoints/update.md b/content/v2.0/monitor-alert/notification-endpoints/update.md
new file mode 100644
index 000000000..d2624e5e4
--- /dev/null
+++ b/content/v2.0/monitor-alert/notification-endpoints/update.md
@@ -0,0 +1,65 @@
+---
+title: Update notification endpoints
+description: >
+ Update notification endpoints in the InfluxDB UI.
+menu:
+ v2_0:
+ name: Update endpoints
+ parent: Manage notification endpoints
+weight: 203
+related:
+ - /v2.0/monitor-alert/checks/
+ - /v2.0/monitor-alert/notification-rules/
+---
+
+To update the notification endpoint details, complete the procedures below as needed. To update the notification endpoint selected for a notification rule, see [update notification rules](/v2.0/monitor-alert/notification-rules/update/).
+
+## Add a label to notification endpoint
+
+1. Select the **Monitoring and Alerting** icon from the sidebar.
+
+ {{< nav-icon "alerts" >}}
+
+2. Under **Notification Endpoints**, click **Add a label** next to the endpoint you want to add a label to. The **Add Labels** box opens.
+3. To add an existing label, select the label from the list.
+4. To create and add a new label:
+
+ - In the search field, enter the name of the new label. The **Create Label** box opens.
+ - In the **Description** field, enter an optional description for the label.
+ - Select a color for the label.
+ - Click **Create Label**.
+
+5. To remove a label, hover over the label under an endpoint and click X.
+
+
+## Disable notification endpoint
+
+1. Select the **Monitoring and Alerting** icon from the sidebar.
+
+
+ {{< nav-icon "alerts" >}}
+
+
+2. Under **Notification Endpoints**, find the endpoint you want to disable.
+3. Click the blue toggle to disable the notification endpoint.
+
+## Update the name or description for notification endpoint
+
+1. Select the **Monitoring and Alerting** icon from the sidebar.
+
+
+ {{< nav-icon "alerts" >}}
+
+
+2. Under **Notification Endpoints**, hover over the name or description of the endpoint.
+3. Click the pencil icon to edit the field.
+4. Click outside of the field to save your changes.
+
+## Change endpoint details
+
+1. Select the **Monitoring and Alerting** icon from the sidebar.
+
+ {{< nav-icon "alerts" >}}
+
+2. Under **Notification Endpoints**, click the endpoint to update.
+3. Update details as needed, and then click **Edit a Notification Endpoint**. For details about each field, see [Create notification endpoints](/v2.0/monitor-alert/notification-endpoints/create/).
diff --git a/content/v2.0/monitor-alert/notification-endpoints/view.md b/content/v2.0/monitor-alert/notification-endpoints/view.md
new file mode 100644
index 000000000..982fa4c42
--- /dev/null
+++ b/content/v2.0/monitor-alert/notification-endpoints/view.md
@@ -0,0 +1,43 @@
+---
+title: View notification endpoint history
+seotitle: View notification endpoint details and history
+description: >
+ View notification endpoint details and history in the InfluxDB UI.
+menu:
+ v2_0:
+ name: View endpoint history
+ parent: Manage notification endpoints
+weight: 202
+related:
+ - /v2.0/monitor-alert/checks/
+ - /v2.0/monitor-alert/notification-rules/
+---
+
+View notification endpoint details and history in the InfluxDB user interface (UI).
+
+- [View notification endpoints](#view-notification-endpoints)
+- [View notification endpoint details](#view-notification-endpoint-details)
+- [View history notification endpoint history](#view-notification-endpoint-history), including statues and notifications sent to the endpoint
+
+## View notification endpoints
+
+- Click **Monitoring & Alerting** in the InfluxDB UI sidebar.
+
+ {{< nav-icon "alerts" >}}
+
+ In the **Notification Endpoints** column, view existing notification endpoints.
+
+## View notification endpoint details
+
+1. Click **Monitoring & Alerting** in the InfluxDB UI sidebar.
+2. In the **Notification Endpoints** column, click the name of the notification endpoint you want to view.
+3. View the notification endpoint destination, name, and information to connect to the endpoint.
+
+## View notification endpoint history
+
+1. Click **Monitoring & Alerting** in the InfluxDB UI sidebar.
+2. In the **Notification Endpoints** column, hover over the notification endpoint, click the **{{< icon "view" >}}** icon, then **View History**.
+The Check Statuses History page displays:
+
+ - Statuses generated for the selected notification endpoint
+ - Notifications sent to the selected notification endpoint
diff --git a/content/v2.0/monitor-alert/notification-rules/_index.md b/content/v2.0/monitor-alert/notification-rules/_index.md
new file mode 100644
index 000000000..bc99a9114
--- /dev/null
+++ b/content/v2.0/monitor-alert/notification-rules/_index.md
@@ -0,0 +1,17 @@
+---
+title: Manage notification rules
+description: >
+ Manage notification rules in InfluxDB.
+weight: 103
+v2.0/tags: [monitor, notifications, alert]
+menu:
+ v2_0:
+ parent: Monitor & alert
+related:
+ - /v2.0/monitor-alert/checks/
+ - /v2.0/monitor-alert/notification-endpoints/
+---
+
+The following articles provide information on managing your notification rules:
+
+{{< children >}}
diff --git a/content/v2.0/monitor-alert/notification-rules/create.md b/content/v2.0/monitor-alert/notification-rules/create.md
new file mode 100644
index 000000000..471f65aff
--- /dev/null
+++ b/content/v2.0/monitor-alert/notification-rules/create.md
@@ -0,0 +1,42 @@
+---
+title: Create notification rules
+description: >
+ Create notification rules to send alerts on your time series data.
+weight: 201
+menu:
+ v2_0:
+ parent: Manage notification rules
+related:
+ - /v2.0/monitor-alert/checks/
+ - /v2.0/monitor-alert/notification-endpoints/
+---
+
+Once you've set up checks and notification endpoints, create notification rules to alert you.
+_For details, see [Manage checks](/v2.0/monitor-alert/checks/) and
+[Manage notification endpoints](/v2.0/monitor-alert/notification-endpoints/)._
+
+## Create a new notification rule in the UI
+
+1. Select the **Monitoring and Alerting** icon from the sidebar.
+
+ {{< nav-icon "alerts" >}}
+
+2. Under **Notification Rules**, click **+Create**.
+3. Complete the **About** section:
+ 1. In the **Name** field, enter a name for the notification rule.
+ 2. In the **Schedule Every** field, enter how frequently the rule should run.
+ 3. In the **Offset** field, enter an offset time. For example,if a task runs on the hour, a 10m offset delays the task to 10 minutes after the hour. Time ranges defined in the task are relative to the specified execution time.
+4. In the **Conditions** section, build a condition using a combination of status and tag keys.
+ - Next to **When status is equal to**, select a status from the drop-down field.
+ - Next to **AND When**, enter one or more tag key-value pairs to filter by.
+5. In the **Message** section, select an endpoint to notify.
+6. Click **Create Notification Rule**.
+
+## Clone an existing notification rule in the UI
+
+1. Select the **Monitoring and Alerting** icon from the sidebar.
+
+ {{< nav-icon "alerts" >}}
+
+2. Under **Notification Rules**, hover over the rule you want to clone.
+3. Click the clone icon and select **Clone**. The cloned rule appears.
diff --git a/content/v2.0/monitor-alert/notification-rules/delete.md b/content/v2.0/monitor-alert/notification-rules/delete.md
new file mode 100644
index 000000000..dea28950d
--- /dev/null
+++ b/content/v2.0/monitor-alert/notification-rules/delete.md
@@ -0,0 +1,21 @@
+---
+title: Delete notification rules
+description: >
+ If you no longer need to receive an alert, delete the associated notification rule.
+weight: 204
+menu:
+ v2_0:
+ parent: Manage notification rules
+related:
+ - /v2.0/monitor-alert/checks/
+ - /v2.0/monitor-alert/notification-endpoints/
+---
+
+## Delete a notification rule in the UI
+
+1. Select the **Monitoring and Alerting** icon from the sidebar.
+
+ {{< nav-icon "alerts" >}}
+
+2. Under **Notification Rules**, find the rule you want to delete.
+3. Click the delete icon, then click **Delete** to confirm.
diff --git a/content/v2.0/monitor-alert/notification-rules/update.md b/content/v2.0/monitor-alert/notification-rules/update.md
new file mode 100644
index 000000000..f88a832d0
--- /dev/null
+++ b/content/v2.0/monitor-alert/notification-rules/update.md
@@ -0,0 +1,51 @@
+---
+title: Update notification rules
+description: >
+ Update notification rules to update the notification message or change the schedule or conditions.
+weight: 203
+menu:
+ v2_0:
+ parent: Manage notification rules
+related:
+ - /v2.0/monitor-alert/checks/
+ - /v2.0/monitor-alert/notification-endpoints/
+---
+
+## Add a label to notification rules
+
+1. Select the **Monitoring and Alerting** icon from the sidebar.
+
+ {{< nav-icon "alerts" >}}
+
+2. Under **Notification Rules**, click **Add a label** next to the rule you want to add a label to. The **Add Labels** box opens.
+3. To add an existing label, select the label from the list.
+4. To create and add a new label:
+ - In the search field, enter the name of the new label. The **Create Label** box opens.
+ - In the **Description** field, enter an optional description for the label.
+ - Select a color for the label.
+ - Click **Create Label**.
+5. To remove a label, hover over the label under to a rule and click X.
+
+
+## Disable notification rules
+
+1. Select the **Monitoring and Alerting** icon from the sidebar.
+
+
+ {{< nav-icon "alerts" >}}
+
+
+2. Under **Notification Rules**, find the rule you want to disable.
+3. Click the blue toggle to disable the notification rule.
+
+## Update the name or description for notification rules
+
+1. Select the **Monitoring and Alerting** icon from the sidebar.
+
+
+ {{< nav-icon "alerts" >}}
+
+
+2. Under **Notification Rules**, hover over the name or description of a rule.
+3. Click the pencil icon to edit the field.
+4. Click outside of the field to save your changes.
diff --git a/content/v2.0/monitor-alert/notification-rules/view.md b/content/v2.0/monitor-alert/notification-rules/view.md
new file mode 100644
index 000000000..bbbcbe130
--- /dev/null
+++ b/content/v2.0/monitor-alert/notification-rules/view.md
@@ -0,0 +1,42 @@
+---
+title: View notification rules
+description: >
+ Update notification rules to update the notification message or change the schedule or conditions.
+weight: 202
+menu:
+ v2_0:
+ parent: Manage notification rules
+related:
+ - /v2.0/monitor-alert/checks/
+ - /v2.0/monitor-alert/notification-endpoints/
+---
+
+View notification rule details and statuses and notifications generated by notification rules in the InfluxDB user interface (UI).
+
+- [View a list of all notification rules](#view-a-list-of-all-notification-rules)
+- [View notification rule details](#view-notification-rule-details)
+- [View statuses generated by a check](#view-statuses-generated-by-a-notification-rule)
+- [View notifications triggered by a notification rule](#view-notifications-triggered-by-a-notification-rule)
+
+To view notification rules, click **Monitoring & Alerting** in the InfluxDB UI sidebar.
+
+{{< nav-icon "alerts" >}}
+
+## View a list of all notification rules
+The **Notification Rules** column on the Monitoring & Alerting landing page displays all existing checks.
+
+## View notification rule details
+In the **Notification Rules** column, click the name of the check you want to view.
+The check builder appears.
+Here you can view the check query and logic.
+
+## View statuses generated by a notification rule
+1. In the **Notification Rules** column, hover over the check, click the **{{< icon "view" >}}**
+ icon, then **View History**.
+ The Statuses History page displays statuses generated by the selected check.
+
+## View notifications triggered by a notification rule
+1. In the **Notification Rules** column, hover over the notification rule, click the **{{< icon "view" >}}**
+ icon, then **View History**.
+2. In the top left corner, click **Notifications**.
+ The Notifications History page displays notifications initiated by the selected notification rule.
diff --git a/content/v2.0/organizations/buckets/create-bucket.md b/content/v2.0/organizations/buckets/create-bucket.md
index 0572d2a70..6616e838a 100644
--- a/content/v2.0/organizations/buckets/create-bucket.md
+++ b/content/v2.0/organizations/buckets/create-bucket.md
@@ -14,16 +14,16 @@ to create a bucket.
## Create a bucket in the InfluxDB UI
-1. Click the **Settings** tab in the navigation bar.
+1. Click **Load Data** in the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "load data" >}}
-2. Select the **Buckets** tab.
+2. Select **Buckets**.
3. Click **{{< icon "plus" >}} Create Bucket** in the upper right.
4. Enter a **Name** for the bucket.
-5. Select **How often to clear data?**:
- Select **Never** to retain data forever.
- Select **Periodically** to define a specific retention policy.
+5. Select when to **Delete Data**:
+ - **Never** to retain data forever.
+ - **Older than** to choose a specific retention policy.
5. Click **Create** to create the bucket.
## Create a bucket using the influx CLI
@@ -32,7 +32,7 @@ Use the [`influx bucket create` command](/v2.0/reference/cli/influx/bucket/creat
to create a new bucket. A bucket requires the following:
- A name
-- The name or ID of the organization to which it belongs
+- The name or ID of the organization the bucket belongs to
- A retention period in nanoseconds
```sh
diff --git a/content/v2.0/organizations/buckets/delete-bucket.md b/content/v2.0/organizations/buckets/delete-bucket.md
index 3622b684c..8c25b4948 100644
--- a/content/v2.0/organizations/buckets/delete-bucket.md
+++ b/content/v2.0/organizations/buckets/delete-bucket.md
@@ -14,13 +14,13 @@ to delete a bucket.
## Delete a bucket in the InfluxDB UI
-1. Click the **Settings** tab in the navigation bar.
+1. Click **Load Data** in the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "load data" >}}
-2. Select the **Buckets** tab.
+2. Select **Buckets**.
3. Hover over the bucket you would like to delete.
-4. Click **Delete** and **Confirm** to delete the bucket.
+4. Click **{{< icon "delete" >}} Delete Bucket** and **Confirm** to delete the bucket.
## Delete a bucket using the influx CLI
diff --git a/content/v2.0/organizations/buckets/update-bucket.md b/content/v2.0/organizations/buckets/update-bucket.md
index f113d9ac7..0bbfef353 100644
--- a/content/v2.0/organizations/buckets/update-bucket.md
+++ b/content/v2.0/organizations/buckets/update-bucket.md
@@ -8,6 +8,7 @@ menu:
parent: Manage buckets
weight: 202
---
+
Use the `influx` command line interface (CLI) or the InfluxDB user interface (UI) to update a bucket.
Note that updating an bucket's name will affect any assets that reference the bucket by name, including the following:
@@ -23,23 +24,22 @@ If you change a bucket name, be sure to update the bucket in the above places as
## Update a bucket's name in the InfluxDB UI
-1. Click the **Settings** tab in the navigation bar.
+1. Click **Load Data** in the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "load data" >}}
-2. Select the **Buckets** tab.
-3. Hover over the name of the bucket you want to rename in the list.
-4. Click **Rename**.
-5. Review the information in the window that appears and click **I understand, let's rename my bucket**.
-6. Update the bucket's name and click **Change Bucket Name**.
+2. Select **Buckets**.
+3. Click **Rename** under the bucket you want to rename.
+4. Review the information in the window that appears and click **I understand, let's rename my bucket**.
+5. Update the bucket's name and click **Change Bucket Name**.
## Update a bucket's retention policy in the InfluxDB UI
-1. Click the **Settings** tab in the navigation bar.
+1. Click **Load Data** in the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "load data" >}}
-2. Select the **Buckets** tab.
+2. Select **Buckets**.
3. Click the name of the bucket you want to update from the list.
4. In the window that appears, edit the bucket's retention policy.
5. Click **Save Changes**.
@@ -50,7 +50,7 @@ Use the [`influx bucket update` command](/v2.0/reference/cli/influx/bucket/updat
to update a bucket. Updating a bucket requires the following:
- The bucket ID _(provided in the output of `influx bucket find`)_
-- The name or ID of the organization to which the bucket belongs
+- The name or ID of the organization the bucket belongs to.
##### Update the name of a bucket
```sh
diff --git a/content/v2.0/organizations/buckets/view-buckets.md b/content/v2.0/organizations/buckets/view-buckets.md
index 5d47f5211..5dd6e91a9 100644
--- a/content/v2.0/organizations/buckets/view-buckets.md
+++ b/content/v2.0/organizations/buckets/view-buckets.md
@@ -11,18 +11,17 @@ weight: 202
## View buckets in the InfluxDB UI
-1. Click the **Settings** tab in the navigation bar.
+1. Click **Load Data** in the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "load data" >}}
-2. Select the **Buckets** tab.
+2. Select **Buckets**.
3. Click on a bucket to view details.
## View buckets using the influx CLI
Use the [`influx bucket find` command](/v2.0/reference/cli/influx/bucket/find)
-to view a buckets in an organization. Viewing bucket requires the following:
-
+to view a buckets in an organization.
```sh
influx bucket find
diff --git a/content/v2.0/process-data/_index.md b/content/v2.0/process-data/_index.md
index d16a1912f..f9374b77b 100644
--- a/content/v2.0/process-data/_index.md
+++ b/content/v2.0/process-data/_index.md
@@ -7,7 +7,7 @@ description: >
menu:
v2_0:
name: Process data
-weight: 5
+weight: 4
v2.0/tags: [tasks]
---
diff --git a/content/v2.0/process-data/common-tasks/downsample-data.md b/content/v2.0/process-data/common-tasks/downsample-data.md
index 1b84563c2..1c4d83148 100644
--- a/content/v2.0/process-data/common-tasks/downsample-data.md
+++ b/content/v2.0/process-data/common-tasks/downsample-data.md
@@ -32,7 +32,7 @@ A separate bucket where aggregated, downsampled data is stored.
To downsample data, it must be aggregated in some way.
What specific method of aggregation you use depends on your specific use case,
but examples include mean, median, top, bottom, etc.
-View [Flux's aggregate functions](/v2.0/reference/flux/functions/built-in/transformations/aggregates/)
+View [Flux's aggregate functions](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/)
for more information and ideas.
## Create a destination bucket
@@ -47,7 +47,7 @@ The example task script below is a very basic form of data downsampling that doe
1. Defines a task named "cq-mem-data-1w" that runs once a week.
2. Defines a `data` variable that represents all data from the last 2 weeks in the
`mem` measurement of the `system-data` bucket.
-3. Uses the [`aggregateWindow()` function](/v2.0/reference/flux/functions/built-in/transformations/aggregates/aggregatewindow/)
+3. Uses the [`aggregateWindow()` function](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/aggregatewindow/)
to window the data into 1 hour intervals and calculate the average of each interval.
4. Stores the aggregated data in the `system-data-downsampled` bucket under the
`my-org` organization.
diff --git a/content/v2.0/process-data/get-started.md b/content/v2.0/process-data/get-started.md
index 45bb10c7e..ef146a4ad 100644
--- a/content/v2.0/process-data/get-started.md
+++ b/content/v2.0/process-data/get-started.md
@@ -54,8 +54,8 @@ in form fields when creating the task.
{{% /note %}}
## Define a data source
-Define a data source using Flux's [`from()` function](/v2.0/reference/flux/functions/built-in/inputs/from/)
-or any other [Flux input functions](/v2.0/reference/flux/functions/built-in/inputs/).
+Define a data source using Flux's [`from()` function](/v2.0/reference/flux/stdlib/built-in/inputs/from/)
+or any other [Flux input functions](/v2.0/reference/flux/stdlib/built-in/inputs/).
For convenience, consider creating a variable that includes the sourced data with
the required time range and any relevant filters.
@@ -88,7 +88,7 @@ specific use case.
The example below illustrates a task that downsamples data by calculating the average of set intervals.
It uses the `data` variable defined [above](#define-a-data-source) as the data source.
It then windows the data into 5 minute intervals and calculates the average of each
-window using the [`aggregateWindow()` function](/v2.0/reference/flux/functions/built-in/transformations/aggregates/aggregatewindow/).
+window using the [`aggregateWindow()` function](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/aggregatewindow/).
```js
data
@@ -104,7 +104,7 @@ _See [Common tasks](/v2.0/process-data/common-tasks) for examples of tasks commo
In the vast majority of task use cases, once data is transformed, it needs to sent and stored somewhere.
This could be a separate bucket with a different retention policy, another measurement, or even an alert endpoint _(Coming)_.
-The example below uses Flux's [`to()` function](/v2.0/reference/flux/functions/built-in/outputs/to)
+The example below uses Flux's [`to()` function](/v2.0/reference/flux/stdlib/built-in/outputs/to)
to send the transformed data to another bucket:
```js
diff --git a/content/v2.0/process-data/manage-tasks/create-task.md b/content/v2.0/process-data/manage-tasks/create-task.md
index c2e0753e5..270a0d1c2 100644
--- a/content/v2.0/process-data/manage-tasks/create-task.md
+++ b/content/v2.0/process-data/manage-tasks/create-task.md
@@ -8,6 +8,8 @@ menu:
name: Create a task
parent: Manage tasks
weight: 201
+related:
+ - /v2.0/reference/cli/influx/task/create
---
InfluxDB provides multiple ways to create tasks both in the InfluxDB user interface (UI)
@@ -36,9 +38,9 @@ The InfluxDB UI provides multiple ways to create a task:
3. Select the **Task** option.
4. Specify the task options. See [Task options](/v2.0/process-data/task-options)
for detailed information about each option.
-5. Click **Save as Task**.
+5. Select a token to use from the **Token** dropdown.
+6. Click **Save as Task**.
-{{< img-hd src="/img/2-0-data-explorer-save-as-task.png" title="Add a task from the Data Explorer"/>}}
### Create a task in the Task UI
1. Click on the **Tasks** icon in the left navigation menu.
@@ -49,10 +51,9 @@ The InfluxDB UI provides multiple ways to create a task:
3. Select **New Task**.
4. In the left panel, specify the task options.
See [Task options](/v2.0/process-data/task-options) for detailed information about each option.
-5. In the right panel, enter your task script.
-6. Click **Save** in the upper right.
-
-{{< img-hd src="/img/2-0-tasks-create-edit.png" title="Create a task" />}}
+5. Select a token to use from the **Token** dropdown.
+6. In the right panel, enter your task script.
+7. Click **Save** in the upper right.
### Import a task
1. Click on the **Tasks** icon in the left navigation menu.
diff --git a/content/v2.0/process-data/manage-tasks/delete-task.md b/content/v2.0/process-data/manage-tasks/delete-task.md
index f89b0981e..eab5c0a54 100644
--- a/content/v2.0/process-data/manage-tasks/delete-task.md
+++ b/content/v2.0/process-data/manage-tasks/delete-task.md
@@ -8,6 +8,8 @@ menu:
name: Delete a task
parent: Manage tasks
weight: 206
+related:
+ - /v2.0/reference/cli/influx/task/delete
---
## Delete a task in the InfluxDB UI
diff --git a/content/v2.0/process-data/manage-tasks/run-task.md b/content/v2.0/process-data/manage-tasks/run-task.md
index ec975adf7..297ca4308 100644
--- a/content/v2.0/process-data/manage-tasks/run-task.md
+++ b/content/v2.0/process-data/manage-tasks/run-task.md
@@ -8,6 +8,9 @@ menu:
name: Run a task
parent: Manage tasks
weight: 203
+related:
+ - /v2.0/reference/cli/influx/task/run
+ - /v2.0/reference/cli/influx/task/retry
---
InfluxDB data processing tasks generally run in defined intervals or at a specific time,
diff --git a/content/v2.0/process-data/manage-tasks/task-run-history.md b/content/v2.0/process-data/manage-tasks/task-run-history.md
index ee4cae300..5539fc32d 100644
--- a/content/v2.0/process-data/manage-tasks/task-run-history.md
+++ b/content/v2.0/process-data/manage-tasks/task-run-history.md
@@ -7,6 +7,8 @@ menu:
name: View run history
parent: Manage tasks
weight: 203
+related:
+ - /v2.0/reference/cli/influx/task/run/find
---
When an InfluxDB task runs, a "run" record is created in the task's history.
diff --git a/content/v2.0/process-data/manage-tasks/update-task.md b/content/v2.0/process-data/manage-tasks/update-task.md
index 3f02ebb5a..c38c25d2c 100644
--- a/content/v2.0/process-data/manage-tasks/update-task.md
+++ b/content/v2.0/process-data/manage-tasks/update-task.md
@@ -8,6 +8,8 @@ menu:
name: Update a task
parent: Manage tasks
weight: 204
+related:
+ - /v2.0/reference/cli/influx/task/update
---
## Update a task in the InfluxDB UI
@@ -15,13 +17,14 @@ To view your tasks, click the **Tasks** icon in the left navigation menu.
{{< nav-icon "tasks" >}}
+Click on the name of a task to update it.
+
#### Update a task's Flux script
1. In the list of tasks, click the **Name** of the task you want to update.
2. In the left panel, modify the task options.
3. In the right panel, modify the task script.
4. Click **Save** in the upper right.
-{{< img-hd src="/img/2-0-tasks-create-edit.png" alt="Update a task" />}}
#### Update the status of a task
In the list of tasks, click the {{< icon "toggle" >}} toggle to the left of the
diff --git a/content/v2.0/process-data/manage-tasks/view-tasks.md b/content/v2.0/process-data/manage-tasks/view-tasks.md
index 8aecb45e2..2c8c91083 100644
--- a/content/v2.0/process-data/manage-tasks/view-tasks.md
+++ b/content/v2.0/process-data/manage-tasks/view-tasks.md
@@ -8,6 +8,8 @@ menu:
name: View tasks
parent: Manage tasks
weight: 202
+related:
+ - /v2.0/reference/cli/influx/task/find
---
## View tasks in the InfluxDB UI
diff --git a/content/v2.0/query-data/execute-queries.md b/content/v2.0/query-data/execute-queries.md
index 462db6f40..b558b5dcc 100644
--- a/content/v2.0/query-data/execute-queries.md
+++ b/content/v2.0/query-data/execute-queries.md
@@ -13,10 +13,10 @@ v2.0/tags: [query]
There are multiple ways to execute queries with InfluxDB.
This guide covers the different options:
-1. [Data Explorer](#data-explorer)
-2. [Influx REPL](#influx-repl)
-3. [Influx query command](#influx-query-command)
-5. [InfluxDB API](#influxdb-api)
+- [Data Explorer](#data-explorer)
+- [Influx REPL](#influx-repl)
+- [Influx query command](#influx-query-command)
+- [InfluxDB API](#influxdb-api)
## Data Explorer
Queries can be built, executed, and visualized in InfluxDB UI's Data Explorer.
@@ -60,35 +60,50 @@ In your request, set the following:
- Your organization via the `org` or `orgID` URL parameters.
- `Authorization` header to `Token ` + your authentication token.
-- `accept` header to `application/csv`.
-- `content-type` header to `application/vnd.flux`.
+- `Accept` header to `application/csv`.
+- `Content-type` header to `application/vnd.flux`.
+- Your plain text query as the request's raw data.
-This lets you POST the Flux query in plain text and receive the annotated CSV response.
+InfluxDB returns the query results in [annotated CSV](/v2.0/reference/annotated-csv/).
+
+{{% note %}}
+#### Use gzip to compress the query response
+To compress the query response, set the `Accept-Encoding` header to `gzip`.
+This saves network bandwidth, but increases server-side load.
+{{% /note %}}
Below is an example `curl` command that queries InfluxDB:
{{< code-tabs-wrapper >}}
{{% code-tabs %}}
-[Multi-line](#)
-[Single-line](#)
+[Without compression](#)
+[With compression](#)
{{% /code-tabs %}}
{{% code-tab-content %}}
```bash
curl http://localhost:9999/api/v2/query?org=my-org -XPOST -sS \
--H 'Authorization: Token YOURAUTHTOKEN' \
--H 'accept:application/csv' \
--H 'content-type:application/vnd.flux' \
--d 'from(bucket:“test”)
- |> range(start:-1000h)
- |> group(columns:[“_measurement”], mode:“by”)
- |> sum()'
+ -H 'Authorization: Token YOURAUTHTOKEN' \
+ -H 'Accept: application/csv' \
+ -H 'Content-type: application/vnd.flux' \
+ -d 'from(bucket:"example-bucket")
+ |> range(start:-1000h)
+ |> group(columns:["_measurement"], mode:"by")
+ |> sum()'
```
{{% /code-tab-content %}}
{{% code-tab-content %}}
```bash
-curl http://localhost:9999/api/v2/query?org=my-org -XPOST -sS -H 'Authorization: Token TOKENSTRINGHERE' -H 'accept:application/csv' -H 'content-type:application/vnd.flux' -d 'from(bucket:“test”) |> range(start:-1000h) |> group(columns:[“_measurement”], mode:“by”) |> sum()'
+curl http://localhost:9999/api/v2/query?org=my-org -XPOST -sS \
+ -H 'Authorization: Token YOURAUTHTOKEN' \
+ -H 'Accept: application/csv' \
+ -H 'Content-type: application/vnd.flux' \
+ -H 'Accept-Encoding: gzip' \
+ -d 'from(bucket:"example-bucket")
+ |> range(start:-1000h)
+ |> group(columns:["_measurement"], mode:"by")
+ |> sum()'
```
{{% /code-tab-content %}}
{{< /code-tabs-wrapper >}}
diff --git a/content/v2.0/query-data/get-started/_index.md b/content/v2.0/query-data/get-started/_index.md
index 4c85cf503..225f9fdb7 100644
--- a/content/v2.0/query-data/get-started/_index.md
+++ b/content/v2.0/query-data/get-started/_index.md
@@ -9,13 +9,16 @@ menu:
v2_0:
name: Get started with Flux
parent: Query data
+related:
+ - /v2.0/reference/flux/
+ - /v2.0/reference/flux/stdlib/
---
Flux is InfluxData's functional data scripting language designed for querying,
analyzing, and acting on data.
-This multi-part getting started guide walks through important concepts related to Flux,
-how to query time series data from InfluxDB using Flux, and introduces Flux syntax and functions.
+This multi-part getting started guide walks through important concepts related to Flux.
+It covers querying time series data from InfluxDB using Flux, and introduces Flux syntax and functions.
## Flux design principles
Flux is designed to be usable, readable, flexible, composable, testable, contributable, and shareable.
@@ -23,7 +26,7 @@ Its syntax is largely inspired by [2018's most popular scripting language](https
Javascript, and takes a functional approach to data exploration and processing.
The following example illustrates querying data stored from the last five minutes,
-filtering by the `cpu` measurement and the `cpu=cpu-usage` tag, windowing the data in 1 minute intervals,
+filtering by the `cpu` measurement and the `cpu=cpu-total` tag, windowing the data in 1 minute intervals,
and calculating the average of each window:
```js
@@ -44,6 +47,7 @@ Flux uses pipe-forward operators (`|>`) extensively to chain operations together
After each function or operation, Flux returns a table or collection of tables containing data.
The pipe-forward operator pipes those tables into the next function or operation where
they are further processed or manipulated.
+This makes it easy to chain together functions to build sophisticated queries.
### Tables
Flux structures all data in tables.
diff --git a/content/v2.0/query-data/get-started/query-influxdb.md b/content/v2.0/query-data/get-started/query-influxdb.md
index 2534cbe29..7ab874778 100644
--- a/content/v2.0/query-data/get-started/query-influxdb.md
+++ b/content/v2.0/query-data/get-started/query-influxdb.md
@@ -7,6 +7,11 @@ menu:
name: Query InfluxDB
parent: Get started with Flux
weight: 201
+related:
+ - /v2.0/query-data/guides/
+ - /v2.0/reference/flux/stdlib/built-in/inputs/from
+ - /v2.0/reference/flux/stdlib/built-in/transformations/range
+ - /v2.0/reference/flux/stdlib/built-in/transformations/filter
---
This guide walks through the basics of using Flux to query data from InfluxDB.
@@ -18,8 +23,8 @@ Every Flux query needs the following:
## 1. Define your data source
-Flux's [`from()`](/v2.0/reference/flux/functions/built-in/inputs/from) function defines an InfluxDB data source.
-It requires a [`bucket`](/v2.0/reference/flux/functions/built-in/inputs/from#bucket) parameter.
+Flux's [`from()`](/v2.0/reference/flux/stdlib/built-in/inputs/from) function defines an InfluxDB data source.
+It requires a [`bucket`](/v2.0/reference/flux/stdlib/built-in/inputs/from#bucket) parameter.
The following examples use `example-bucket` as the bucket name.
```js
@@ -31,9 +36,9 @@ Flux requires a time range when querying time series data.
"Unbounded" queries are very resource-intensive and as a protective measure,
Flux will not query the database without a specified range.
-Use the pipe-forward operator (`|>`) to pipe data from your data source into the [`range()`](/v2.0/reference/flux/functions/built-in/transformations/range)
+Use the pipe-forward operator (`|>`) to pipe data from your data source into the [`range()`](/v2.0/reference/flux/stdlib/built-in/transformations/range)
function, which specifies a time range for your query.
-It accepts two properties: `start` and `stop`.
+It accepts two parameters: `start` and `stop`.
Ranges can be **relative** using negative [durations](/v2.0/reference/flux/language/lexical-elements#duration-literals)
or **absolute** using [timestamps](/v2.0/reference/flux/language/lexical-elements#date-and-time-literals).
@@ -101,7 +106,7 @@ from(bucket:"example-bucket")
```
## 4. Yield your queried data
-Use Flux's `yield()` function to output the filtered tables as the result of the query.
+Flux's `yield()` function outputs the filtered tables as the result of the query.
```js
from(bucket:"example-bucket")
@@ -114,16 +119,17 @@ from(bucket:"example-bucket")
|> yield()
```
-{{% note %}}
-Flux automatically assume a `yield()` function at
+Flux automatically assumes a `yield()` function at
the end of each script in order to output and visualize the data.
-`yield()` is only necessary when including multiple queries in the same Flux query.
+Explicitly calling `yield()` is only necessary when including multiple queries in the same Flux query.
Each set of returned data needs to be named using the `yield()` function.
-{{% /note %}}
## Congratulations!
You have now queried data from InfluxDB using Flux.
-This is a barebones query that can be transformed in other ways.
+
+The query shown here is a barebones example.
+Flux queries can be extended in many ways to form powerful scripts.
+
Get started with Flux
diff --git a/content/v2.0/query-data/get-started/syntax-basics.md b/content/v2.0/query-data/get-started/syntax-basics.md
index a4c58c9f4..32a6e1023 100644
--- a/content/v2.0/query-data/get-started/syntax-basics.md
+++ b/content/v2.0/query-data/get-started/syntax-basics.md
@@ -7,6 +7,8 @@ menu:
name: Syntax basics
parent: Get started with Flux
weight: 203
+related:
+ - /v2.0/reference/flux/language/types/
---
@@ -184,10 +186,8 @@ topN = (tables=<-, n) => tables |> sort(desc: true) |> limit(n: n)
{{% /code-tab-content %}}
{{< /code-tabs-wrapper >}}
-_More information about creating custom functions is available in the [Custom functions](/v2.0/query-data/guides/custom-functions) documentation._
-
-Using the `cpuUsageUser` data stream variable defined above, find the top five data
-points with the custom `topN` function and yield the results.
+Using this new custom function `topN` and the `cpuUsageUser` data stream variable defined above,
+we can find the top five data points and yield the results.
{{< code-tabs-wrapper >}}
{{% code-tabs %}}
@@ -213,6 +213,8 @@ cpuUsageUser |> topN(n:5) |> yield()
This query will return the five data points with the highest user CPU usage over the last hour.
+_More information about creating custom functions is available in the [Custom functions](/v2.0/query-data/guides/custom-functions) documentation._
+
diff --git a/content/v2.0/query-data/get-started/transform-data.md b/content/v2.0/query-data/get-started/transform-data.md
index 5dff2d320..76328698c 100644
--- a/content/v2.0/query-data/get-started/transform-data.md
+++ b/content/v2.0/query-data/get-started/transform-data.md
@@ -7,15 +7,19 @@ menu:
name: Transform data
parent: Get started with Flux
weight: 202
+related:
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/aggregatewindow
+ - /v2.0/reference/flux/stdlib/built-in/transformations/window
---
When [querying data from InfluxDB](/v2.0/query-data/get-started/query-influxdb),
you often need to transform that data in some way.
Common examples are aggregating data into averages, downsampling data, etc.
-This guide demonstrates using [Flux functions](/v2.0/reference/flux/functions) to transform your data.
+This guide demonstrates using [Flux functions](/v2.0/reference/flux/stdlib) to transform your data.
It walks through creating a Flux script that partitions data into windows of time,
averages the `_value`s in each window, and outputs the averages as a new table.
+(Remember, Flux structures all data in [tables](/v2.0/query-data/get-started/#tables).)
It's important to understand how the "shape" of your data changes through each of these operations.
@@ -36,13 +40,13 @@ from(bucket:"example-bucket")
## Flux functions
Flux provides a number of functions that perform specific operations, transformations, and tasks.
You can also [create custom functions](/v2.0/query-data/guides/custom-functions) in your Flux queries.
-_Functions are covered in detail in the [Flux functions](/v2.0/reference/flux/functions) documentation._
+_Functions are covered in detail in the [Flux functions](/v2.0/reference/flux/stdlib) documentation._
A common type of function used when transforming data queried from InfluxDB is an aggregate function.
Aggregate functions take a set of `_value`s in a table, aggregate them, and transform
them into a new value.
-This example uses the [`mean()` function](/v2.0/reference/flux/functions/built-in/transformations/aggregates/mean)
+This example uses the [`mean()` function](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/mean)
to average values within each time window.
{{% note %}}
@@ -52,7 +56,7 @@ It's just good to understand the steps in the process.
{{% /note %}}
## Window your data
-Flux's [`window()` function](/v2.0/reference/flux/functions/built-in/transformations/window) partitions records based on a time value.
+Flux's [`window()` function](/v2.0/reference/flux/stdlib/built-in/transformations/window) partitions records based on a time value.
Use the `every` parameter to define a duration of each window.
For this example, window data in five minute intervals (`5m`).
@@ -75,7 +79,7 @@ When visualized, each table is assigned a unique color.
## Aggregate windowed data
Flux aggregate functions take the `_value`s in each table and aggregate them in some way.
-Use the [`mean()` function](/v2.0/reference/flux/functions/built-in/transformations/aggregates/mean) to average the `_value`s of each table.
+Use the [`mean()` function](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/mean) to average the `_value`s of each table.
```js
from(bucket:"example-bucket")
@@ -101,7 +105,7 @@ Aggregate functions don't infer what time should be used for the aggregate value
Therefore the `_time` column is dropped.
A `_time` column is required in the [next operation](#unwindow-aggregate-tables).
-To add one, use the [`duplicate()` function](/v2.0/reference/flux/functions/built-in/transformations/duplicate)
+To add one, use the [`duplicate()` function](/v2.0/reference/flux/stdlib/built-in/transformations/duplicate)
to duplicate the `_stop` column as the `_time` column for each windowed table.
```js
@@ -146,7 +150,7 @@ process helps to understand how data changes "shape" as it is passed through eac
Flux provides (and allows you to create) "helper" functions that abstract many of these steps.
The same operation performed in this guide can be accomplished using the
-[`aggregateWindow()` function](/v2.0/reference/flux/functions/built-in/transformations/aggregates/aggregatewindow).
+[`aggregateWindow()` function](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/aggregatewindow).
```js
from(bucket:"example-bucket")
diff --git a/content/v2.0/query-data/guides/conditional-logic.md b/content/v2.0/query-data/guides/conditional-logic.md
index 857797ee4..22c7494c0 100644
--- a/content/v2.0/query-data/guides/conditional-logic.md
+++ b/content/v2.0/query-data/guides/conditional-logic.md
@@ -27,9 +27,9 @@ Conditional expressions are most useful in the following contexts:
- When defining variables.
- When using functions that operate on a single row at a time (
- [`filter()`](/v2.0/reference/flux/functions/built-in/transformations/filter/),
- [`map()`](/v2.0/reference/flux/functions/built-in/transformations/map/),
- [`reduce()`](/v2.0/reference/flux/functions/built-in/transformations/aggregates/reduce) ).
+ [`filter()`](/v2.0/reference/flux/stdlib/built-in/transformations/filter/),
+ [`map()`](/v2.0/reference/flux/stdlib/built-in/transformations/map/),
+ [`reduce()`](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/reduce) ).
## Examples
@@ -72,7 +72,7 @@ from(bucket: "example-bucket")
### Conditionally transform column values with map()
-The following example uses the [`map()` function](/v2.0/reference/flux/functions/built-in/transformations/map/)
+The following example uses the [`map()` function](/v2.0/reference/flux/stdlib/built-in/transformations/map/)
to conditionally transform column values.
It sets the `level` column to a specific string based on `_value` column.
@@ -87,8 +87,7 @@ from(bucket: "example-bucket")
|> range(start: -5m)
|> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent" )
|> map(fn: (r) => ({
- _time: r._time,
- _value: r._value,
+ r with
level:
if r._value >= 95.0000001 and r._value <= 100.0 then "critical"
else if r._value >= 85.0000001 and r._value <= 95.0 then "warning"
@@ -104,10 +103,8 @@ from(bucket: "example-bucket")
|> range(start: -5m)
|> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent" )
|> map(fn: (r) => ({
- // Retain the _time column in the mapped row
- _time: r._time,
- // Retain the _value column in the mapped row
- _value: r._value,
+ // Retain all existing columns in the mapped row
+ r with
// Set the level column value based on the _value column
level:
if r._value >= 95.0000001 and r._value <= 100.0 then "critical"
@@ -122,8 +119,8 @@ from(bucket: "example-bucket")
{{< /code-tabs-wrapper >}}
### Conditionally increment a count with reduce()
-The following example uses the [`aggregateWindow()`](/v2.0/reference/flux/functions/built-in/transformations/aggregates/aggregatewindow/)
-and [`reduce()`](/v2.0/reference/flux/functions/built-in/transformations/aggregates/reduce/)
+The following example uses the [`aggregateWindow()`](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/aggregatewindow/)
+and [`reduce()`](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/reduce/)
functions to count the number of records in every five minute window that exceed a defined threshold.
{{< code-tabs-wrapper >}}
diff --git a/content/v2.0/query-data/guides/custom-functions/_index.md b/content/v2.0/query-data/guides/custom-functions/_index.md
index d72154420..d15716bcd 100644
--- a/content/v2.0/query-data/guides/custom-functions/_index.md
+++ b/content/v2.0/query-data/guides/custom-functions/_index.md
@@ -70,14 +70,14 @@ functionName = (tables=<-) => tables |> functionOperations
###### Multiply row values by x
The example below defines a `multByX` function that multiplies the `_value` column
of each row in the input table by the `x` parameter.
-It uses the [`map()` function](/v2.0/reference/flux/functions/built-in/transformations/map)
+It uses the [`map()` function](/v2.0/reference/flux/stdlib/built-in/transformations/map)
to modify each `_value`.
```js
// Function definition
multByX = (tables=<-, x) =>
tables
- |> map(fn: (r) => r._value * x)
+ |> map(fn: (r) => ({ r with _value: r._value * x}))
// Function usage
from(bucket: "example-bucket")
@@ -104,9 +104,9 @@ Defaults are overridden by explicitly defining the parameter in the function cal
###### Get the winner or the "winner"
The example below defines a `getWinner` function that returns the record with the highest
or lowest `_value` (winner versus "winner") depending on the `noSarcasm` parameter which defaults to `true`.
-It uses the [`sort()` function](/v2.0/reference/flux/functions/built-in/transformations/sort)
+It uses the [`sort()` function](/v2.0/reference/flux/stdlib/built-in/transformations/sort)
to sort records in either descending or ascending order.
-It then uses the [`limit()` function](/v2.0/reference/flux/functions/built-in/transformations/limit)
+It then uses the [`limit()` function](/v2.0/reference/flux/stdlib/built-in/transformations/limit)
to return the first record from the sorted table.
```js
diff --git a/content/v2.0/query-data/guides/custom-functions/custom-aggregate.md b/content/v2.0/query-data/guides/custom-functions/custom-aggregate.md
index 61ac57bbd..b56d68dfc 100644
--- a/content/v2.0/query-data/guides/custom-functions/custom-aggregate.md
+++ b/content/v2.0/query-data/guides/custom-functions/custom-aggregate.md
@@ -10,9 +10,9 @@ weight: 301
---
To aggregate your data, use the Flux
-[built-in aggregate functions](/v2.0/reference/flux/functions/built-in/transformations/aggregates/)
+[built-in aggregate functions](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/)
or create custom aggregate functions using the
-[`reduce()`function](/v2.0/reference/flux/functions/built-in/transformations/aggregates/reduce/).
+[`reduce()`function](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/reduce/).
## Aggregate function characteristics
Aggregate functions all have the same basic characteristics:
@@ -22,7 +22,7 @@ Aggregate functions all have the same basic characteristics:
## How reduce() works
The `reduce()` function operates on one row at a time using the function defined in
-the [`fn` parameter](/v2.0/reference/flux/functions/built-in/transformations/aggregates/reduce/#fn).
+the [`fn` parameter](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/reduce/#fn).
The `fn` function maps keys to specific values using two [objects](/v2.0/query-data/get-started/syntax-basics/#objects)
specified by the following parameters:
@@ -32,7 +32,7 @@ specified by the following parameters:
| `accumulator` | An object that contains values used in each row's aggregate calculation. |
{{% note %}}
-The `reduce()` function's [`identity` parameter](/v2.0/reference/flux/functions/built-in/transformations/aggregates/reduce/#identity)
+The `reduce()` function's [`identity` parameter](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/reduce/#identity)
defines the initial `accumulator` object.
{{% /note %}}
@@ -49,6 +49,11 @@ in an input table.
)
```
+{{% note %}}
+To preserve existing columns, [use the `with` operator](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/reduce/#preserve-columns)
+when mapping values in the `r` object.
+{{% /note %}}
+
To illustrate how this function works, take this simplified table for example:
```txt
@@ -145,7 +150,7 @@ and the `reduce()` function to aggregate rows in each input table.
### Create a custom average function
This example illustrates how to create a function that averages values in a table.
_This is meant for demonstration purposes only.
-The built-in [`mean()` function](/v2.0/reference/flux/functions/built-in/transformations/aggregates/mean/)
+The built-in [`mean()` function](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/mean/)
does the same thing and is much more performant._
{{< code-tabs-wrapper >}}
diff --git a/content/v2.0/query-data/guides/exists.md b/content/v2.0/query-data/guides/exists.md
new file mode 100644
index 000000000..15b172bb9
--- /dev/null
+++ b/content/v2.0/query-data/guides/exists.md
@@ -0,0 +1,69 @@
+---
+title: Check if a value exists
+seotitle: Use Flux to check if a value exists
+description: >
+ Use the Flux `exists` operator to check if an object contains a key or if that
+ key's value is `null`.
+v2.0/tags: [exists]
+menu:
+ v2_0:
+ name: Check if a value exists
+ parent: How-to guides
+weight: 209
+---
+
+Use the Flux `exists` operator to check if an object contains a key or if that
+key's value is `null`.
+
+```js
+p = {firstName: "John", lastName: "Doe", age: 42}
+
+exists p.firstName
+// Returns true
+
+exists p.height
+// Returns false
+```
+
+Use `exists` with row functions (
+[`filter()`](/v2.0/reference/flux/stdlib/built-in/transformations/filter/),
+[`map()`](/v2.0/reference/flux/stdlib/built-in/transformations/map/),
+[`reduce()`](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/reduce/))
+to check if a row includes a column or if the value for that column is `null`.
+
+#### Filter out null values
+```js
+from(bucket: "example-bucket")
+ |> range(start: -5m)
+ |> filter(fn: (r) => exists r._value)
+```
+
+#### Map values based on existence
+```js
+from(bucket: "default")
+ |> range(start: -30s)
+ |> map(fn: (r) => ({
+ r with
+ human_readable:
+ if exists r._value then "${r._field} is ${string(v:r._value)}."
+ else "${r._field} has no value."
+ }))
+```
+
+#### Ignore null values in a custom aggregate function
+```js
+customSumProduct = (tables=<-) =>
+ tables
+ |> reduce(
+ identity: {sum: 0.0, product: 1.0},
+ fn: (r, accumulator) => ({
+ r with
+ sum:
+ if exists r._value then r._value + accumulator.sum
+ else accumulator.sum,
+ product:
+ if exists r._value then r.value * accumulator.product
+ else accumulator.product
+ })
+ )
+```
diff --git a/content/v2.0/query-data/guides/group-data.md b/content/v2.0/query-data/guides/group-data.md
index 50a3c4fde..fd3c56eb9 100644
--- a/content/v2.0/query-data/guides/group-data.md
+++ b/content/v2.0/query-data/guides/group-data.md
@@ -28,7 +28,7 @@ Understanding how modifying group keys shapes output data is key to successfully
grouping and transforming data into your desired output.
## group() Function
-Flux's [`group()` function](/v2.0/reference/flux/functions/built-in/transformations/group) defines the
+Flux's [`group()` function](/v2.0/reference/flux/stdlib/built-in/transformations/group) defines the
group key for output tables, i.e. grouping records based on values for specific columns.
###### group() example
diff --git a/content/v2.0/query-data/guides/histograms.md b/content/v2.0/query-data/guides/histograms.md
index 8baa0cce2..4253e8183 100644
--- a/content/v2.0/query-data/guides/histograms.md
+++ b/content/v2.0/query-data/guides/histograms.md
@@ -6,7 +6,7 @@ menu:
v2_0:
name: Create histograms
parent: How-to guides
-weight: 207
+weight: 208
---
@@ -14,7 +14,7 @@ Histograms provide valuable insight into the distribution of your data.
This guide walks through using Flux's `histogram()` function to transform your data into a **cumulative histogram**.
## histogram() function
-The [`histogram()` function](/v2.0/reference/flux/functions/built-in/transformations/histogram) approximates the
+The [`histogram()` function](/v2.0/reference/flux/stdlib/built-in/transformations/histogram) approximates the
cumulative distribution of a dataset by counting data frequencies for a list of "bins."
A **bin** is simply a range in which a data point falls.
All data points that are less than or equal to the bound are counted in the bin.
@@ -41,7 +41,7 @@ Flux provides two helper functions for generating histogram bins.
Each generates an array of floats designed to be used in the `histogram()` function's `bins` parameter.
### linearBins()
-The [`linearBins()` function](/v2.0/reference/flux/functions/built-in/misc/linearbins) generates a list of linearly separated floats.
+The [`linearBins()` function](/v2.0/reference/flux/stdlib/built-in/misc/linearbins) generates a list of linearly separated floats.
```js
linearBins(start: 0.0, width: 10.0, count: 10)
@@ -50,17 +50,36 @@ linearBins(start: 0.0, width: 10.0, count: 10)
```
### logarithmicBins()
-The [`logarithmicBins()` function](/v2.0/reference/flux/functions/built-in/misc/logarithmicbins) generates a list of exponentially separated floats.
+The [`logarithmicBins()` function](/v2.0/reference/flux/stdlib/built-in/misc/logarithmicbins) generates a list of exponentially separated floats.
```js
-logarithmicBins(start: 1.0, factor: 2.0, count: 10, infinty: true)
+logarithmicBins(start: 1.0, factor: 2.0, count: 10, infinity: true)
// Generated list: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, +Inf]
```
+## Histogram visualization
+The [Histogram visualization type](/v2.0/visualize-data/visualization-types/histogram/)
+automatically converts query results into a binned and segmented histogram.
+
+{{< img-hd src="/img/2-0-visualizations-histogram-example.png" alt="Histogram visualization" />}}
+
+Use the [Histogram visualization controls](/v2.0/visualize-data/visualization-types/histogram/#histogram-controls)
+to specify the number of bins and define groups in bins.
+
+### Histogram visualization data structure
+Because the Histogram visualization uses visualization controls to creates bins and groups,
+**do not** structure query results as histogram data.
+
+{{% note %}}
+Output of the [`histogram()` function](#histogram-function) is **not** compatible
+with the Histogram visualization type.
+View the example [below](#visualize-errors-by-severity).
+{{% /note %}}
+
## Examples
-### Generating a histogram with linear bins
+### Generate a histogram with linear bins
```js
from(bucket:"example-bucket")
|> range(start: -5m)
@@ -105,7 +124,7 @@ Table: keys: [_start, _stop, _field, _measurement, host]
2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 75 30
```
-### Generating a histogram with logarithmic bins
+### Generate a histogram with logarithmic bins
```js
from(bucket:"example-bucket")
|> range(start: -5m)
@@ -139,3 +158,22 @@ Table: keys: [_start, _stop, _field, _measurement, host]
2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 128 30
2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 256 30
```
+
+### Visualize errors by severity
+Use the [Telegraf Syslog plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/syslog)
+to collect error information from your system.
+Query the `severity_code` field in the `syslog` measurement:
+
+```js
+from(bucket: "example-bucket")
+ |> range(start: v.timeRangeStart, stop: v.timeRangeStop)
+ |> filter(fn: (r) =>
+ r._measurement == "syslog" and
+ r._field == "severity_code"
+ )
+```
+
+In the Histogram visualization options, select `_time` as the **X Column**
+and `severity` as the **Group By** option:
+
+{{< img-hd src="/img/2-0-visualizations-histogram-errors.png" alt="Logs by severity histogram" />}}
diff --git a/content/v2.0/query-data/guides/join.md b/content/v2.0/query-data/guides/join.md
index 002968c3b..e0932ba3c 100644
--- a/content/v2.0/query-data/guides/join.md
+++ b/content/v2.0/query-data/guides/join.md
@@ -10,7 +10,7 @@ menu:
weight: 205
---
-The [`join()` function](/v2.0/reference/flux/functions/built-in/transformations/join) merges two or more
+The [`join()` function](/v2.0/reference/flux/stdlib/built-in/transformations/join) merges two or more
input streams, whose values are equal on a set of common columns, into a single output stream.
Flux allows you to join on any columns common between two data streams and opens the door
for operations such as cross-measurement joins and math across measurements.
@@ -205,7 +205,7 @@ These represent the columns with values unique to the two input tables.
## Calculate and create a new table
With the two streams of data joined into a single table, use the
-[`map()` function](/v2.0/reference/flux/functions/built-in/transformations/map)
+[`map()` function](/v2.0/reference/flux/stdlib/built-in/transformations/map)
to build a new table by mapping the existing `_time` column to a new `_time`
column and dividing `_value_mem` by `_value_proc` and mapping it to a
new `_value` column.
@@ -213,9 +213,10 @@ new `_value` column.
```js
join(tables: {mem:memUsed, proc:procTotal}, on: ["_time", "_stop", "_start", "host"])
|> map(fn: (r) => ({
- _time: r._time,
- _value: r._value_mem / r._value_proc
- }))
+ _time: r._time,
+ _value: r._value_mem / r._value_proc
+ })
+ )
```
{{% truncate %}}
diff --git a/content/v2.0/query-data/guides/manipulate-timestamps.md b/content/v2.0/query-data/guides/manipulate-timestamps.md
new file mode 100644
index 000000000..47c3269be
--- /dev/null
+++ b/content/v2.0/query-data/guides/manipulate-timestamps.md
@@ -0,0 +1,108 @@
+---
+title: Manipulate timestamps with Flux
+description: >
+ Use Flux to process and manipulate timestamps.
+menu:
+ v2_0:
+ name: Manipulate timestamps
+ parent: How-to guides
+weight: 209
+---
+
+Every point stored in InfluxDB has an associated timestamp.
+Use Flux to process and manipulate timestamps to suit your needs.
+
+- [Convert timestamp format](#convert-timestamp-format)
+- [Time-related Flux functions](#time-related-flux-functions)
+
+## Convert timestamp format
+
+### Convert nanosecond epoch timestamp to RFC3339
+Use the [`time()` function](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/time/)
+to convert a **nanosecond** epoch timestamp to an RFC3339 timestamp.
+
+```js
+time(v: 1568808000000000000)
+// Returns 2019-09-18T12:00:00.000000000Z
+```
+
+### Convert RFC3339 to nanosecond epoch timestamp
+Use the [`uint()` function](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/unit/)
+to convert an RFC3339 timestamp to a nanosecond epoch timestamp.
+
+```js
+uint(v: 2019-09-18T12:00:00.000000000Z)
+// Returns 1568808000000000000
+```
+
+### Calculate the duration between two timestamps
+Flux doesn't support mathematical operations using [time type](/v2.0/reference/flux/language/types/#time-types) values.
+To calculate the duration between two timestamps:
+
+1. Use the `uint()` function to convert each timestamp to a nanosecond epoch timestamp.
+2. Subtract one nanosecond epoch timestamp from the other.
+3. Use the `duration()` function to convert the result into a duration.
+
+```js
+time1 = uint(v: 2019-09-17T21:12:05Z)
+time2 = uint(v: 2019-09-18T22:16:35Z)
+
+duration(v: time2 - time1)
+// Returns 25h4m30s
+```
+
+{{% note %}}
+Flux doesn't support duration column types.
+To store a duration in a column, use the [`string()` function](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/string/)
+to convert the duration to a string.
+{{% /note %}}
+
+## Time-related Flux functions
+
+### Retrieve the current time
+Use the [`now()` function](/v2.0/reference/flux/stdlib/built-in/misc/now/) to
+return the current UTC time in RFC3339 format.
+
+```js
+now()
+```
+
+### Add a duration to a timestamp
+The [`experimental.addDuration()` function](/v2.0/reference/flux/stdlib/experimental/addduration/)
+adds a duration to a specified time and returns the resulting time.
+
+{{% warn %}}
+By using `experimental.addDuration()`, you accept the
+[risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+{{% /warn %}}
+
+```js
+import "experimental"
+
+experimental.addDuration(
+ d: 6h,
+ to: 2019-09-16T12:00:00Z,
+)
+
+// Returns 2019-09-16T18:00:00.000000000Z
+```
+
+### Subtract a duration from a timestamp
+The [`experimental.subDuration()` function](/v2.0/reference/flux/stdlib/experimental/subduration/)
+subtracts a duration from a specified time and returns the resulting time.
+
+{{% warn %}}
+By using `experimental.subDuration()`, you accept the
+[risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+{{% /warn %}}
+
+```js
+import "experimental"
+
+experimental.subDuration(
+ d: 6h,
+ from: 2019-09-16T12:00:00Z,
+)
+
+// Returns 2019-09-16T06:00:00.000000000Z
+```
diff --git a/content/v2.0/query-data/guides/mathematic-operations.md b/content/v2.0/query-data/guides/mathematic-operations.md
index bafa56fb0..39cbb7380 100644
--- a/content/v2.0/query-data/guides/mathematic-operations.md
+++ b/content/v2.0/query-data/guides/mathematic-operations.md
@@ -40,7 +40,7 @@ Otherwise, you will get an error similar to:
Error: type error: float != int
```
-To convert operands to the same type, use [type-conversion functions](/v2.0/reference/flux/functions/built-in/transformations/type-conversions/)
+To convert operands to the same type, use [type-conversion functions](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/)
or manually format operands.
The operand data type determines the output data type.
For example:
@@ -82,21 +82,21 @@ percent(sample: 20.0, total: 80.0)
To transform multiple values in an input stream, your function needs to:
- [Handle piped-forward data](/v2.0/query-data/guides/custom-functions/#functions-that-manipulate-piped-forward-data).
-- Use the [`map()` function](/v2.0/reference/flux/functions/built-in/transformations/map) to iterate over each row.
+- Use the [`map()` function](/v2.0/reference/flux/stdlib/built-in/transformations/map) to iterate over each row.
The example `multiplyByX()` function below includes:
- A `tables` parameter that represents the input data stream (`<-`).
- An `x` parameter which is the number by which values in the `_value` column are multiplied.
- A `map()` function that iterates over each row in the input stream.
- It uses the `_time` value of the input stream to define the `_time` value in the output stream.
+ It uses the `with` operator to preserve existing columns in each row.
It also multiples the `_value` column by `x`.
```js
multiplyByX = (x, tables=<-) =>
tables
|> map(fn: (r) => ({
- _time: r._time,
+ r with
_value: r._value * x
})
)
@@ -115,17 +115,17 @@ The `map()` function iterates over each row in the piped-forward data and define
a new `_value` by dividing the original `_value` by 1073741824.
```js
-from(bucket: "default")
+from(bucket: "example-bucket")
|> range(start: -10m)
|> filter(fn: (r) =>
r._measurement == "mem" and
r._field == "active"
)
|> map(fn: (r) => ({
- _time: r._time,
- _value: r._value / 1073741824
- })
- )
+ r with
+ _value: r._value / 1073741824
+ })
+ )
```
You could turn that same calculation into a function:
@@ -134,7 +134,7 @@ You could turn that same calculation into a function:
bytesToGB = (tables=<-) =>
tables
|> map(fn: (r) => ({
- _time: r._time,
+ r with
_value: r._value / 1073741824
})
)
@@ -146,14 +146,14 @@ data
#### Include partial gigabytes
Because the original metric (bytes) is an integer, the output of the operation is an integer and does not include partial GBs.
To calculate partial GBs, convert the `_value` column and its values to floats using the
-[`float()` function](/v2.0/reference/flux/functions/built-in/transformations/type-conversions/float)
+[`float()` function](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/float)
and format the denominator in the division operation as a float.
```js
bytesToGB = (tables=<-) =>
tables
|> map(fn: (r) => ({
- _time: r._time,
+ r with
_value: float(v: r._value) / 1073741824.0
})
)
@@ -195,7 +195,7 @@ usageToFloat = (tables=<-) =>
// Define the data source and filter user and system CPU usage
// from 'cpu-total' in the 'cpu' measurement
-from(bucket: "default")
+from(bucket: "example-bucket")
|> range(start: -1h)
|> filter(fn: (r) =>
r._measurement == "cpu" and
@@ -213,7 +213,8 @@ from(bucket: "default")
// Map over each row and calculate the percentage of
// CPU used by the user vs the system
|> map(fn: (r) => ({
- _time: r._time,
+ // Preserve existing columns in each row
+ r with
usage_user: r.usage_user / (r.usage_user + r.usage_system) * 100.0,
usage_system: r.usage_system / (r.usage_user + r.usage_system) * 100.0
})
@@ -232,7 +233,7 @@ usageToFloat = (tables=<-) =>
})
)
-from(bucket: "default")
+from(bucket: "example-bucket")
|> range(start: timeRangeStart, stop: timeRangeStop)
|> filter(fn: (r) =>
r._measurement == "cpu" and
@@ -243,7 +244,7 @@ from(bucket: "default")
|> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value")
|> usageToFloat()
|> map(fn: (r) => ({
- _time: r._time,
+ r with
usage_user: r.usage_user / (r.usage_user + r.usage_system) * 100.0,
usage_system: r.usage_system / (r.usage_user + r.usage_system) * 100.0
})
diff --git a/content/v2.0/query-data/guides/sort-limit.md b/content/v2.0/query-data/guides/sort-limit.md
index 4dd175588..5d053a0ad 100644
--- a/content/v2.0/query-data/guides/sort-limit.md
+++ b/content/v2.0/query-data/guides/sort-limit.md
@@ -12,7 +12,7 @@ menu:
weight: 206
---
-The [`sort()`function](/v2.0/reference/flux/functions/built-in/transformations/sort)
+The [`sort()`function](/v2.0/reference/flux/stdlib/built-in/transformations/sort)
orders the records within each table.
The following example orders system uptime first by region, then host, then value.
@@ -26,7 +26,7 @@ from(bucket:"example-bucket")
|> sort(columns:["region", "host", "_value"])
```
-The [`limit()` function](/v2.0/reference/flux/functions/built-in/transformations/limit)
+The [`limit()` function](/v2.0/reference/flux/stdlib/built-in/transformations/limit)
limits the number of records in output tables to a fixed number, `n`.
The following example shows up to 10 records from the past hour.
@@ -52,6 +52,6 @@ from(bucket:"example-bucket")
```
You now have created a Flux query that sorts and limits data.
-Flux also provides the [`top()`](/v2.0/reference/flux/functions/built-in/transformations/selectors/top)
-and [`bottom()`](/v2.0/reference/flux/functions/built-in/transformations/selectors/bottom)
+Flux also provides the [`top()`](/v2.0/reference/flux/stdlib/built-in/transformations/selectors/top)
+and [`bottom()`](/v2.0/reference/flux/stdlib/built-in/transformations/selectors/bottom)
functions to perform both of these functions at the same time.
diff --git a/content/v2.0/query-data/guides/sql.md b/content/v2.0/query-data/guides/sql.md
new file mode 100644
index 000000000..76845d1f3
--- /dev/null
+++ b/content/v2.0/query-data/guides/sql.md
@@ -0,0 +1,217 @@
+---
+title: Query SQL data sources
+seotitle: Query SQL data sources with InfluxDB
+description: >
+ The Flux `sql` package provides functions for working with SQL data sources.
+ Use `sql.from()` to query SQL databases like PostgreSQL and MySQL
+v2.0/tags: [query, flux, sql]
+menu:
+ v2_0:
+ parent: How-to guides
+weight: 207
+---
+
+The [Flux](/v2.0/reference/flux) `sql` package provides functions for working with SQL data sources.
+[`sql.from()`](/v2.0/reference/flux/stdlib/sql/from/) lets you query SQL data sources
+like [PostgreSQL](https://www.postgresql.org/) and [MySQL](https://www.mysql.com/)
+and use the results with InfluxDB dashboards, tasks, and other operations.
+
+- [Query a SQL data source](#query-a-sql-data-source)
+- [Join SQL data with data in InfluxDB](#join-sql-data-with-data-in-influxdb)
+- [Use SQL results to populate dashboard variables](#use-sql-results-to-populate-dashboard-variables)
+- [Sample sensor data](#sample-sensor-data)
+
+## Query a SQL data source
+To query a SQL data source:
+
+1. Import the `sql` package in your Flux query
+2. Use the `sql.from()` function to specify the driver, data source name (DSN),
+ and query used to query data from your SQL data source:
+
+{{< code-tabs-wrapper >}}
+{{% code-tabs %}}
+[PostgreSQL](#)
+[MySQL](#)
+{{% /code-tabs %}}
+
+{{% code-tab-content %}}
+```js
+import "sql"
+
+sql.from(
+ driverName: "postgres",
+ dataSourceName: "postgresql://user:password@localhost",
+ query: "SELECT * FROM example_table"
+)
+```
+{{% /code-tab-content %}}
+
+{{% code-tab-content %}}
+```js
+import "sql"
+
+sql.from(
+ driverName: "mysql",
+ dataSourceName: "user:password@tcp(localhost:3306)/db",
+ query: "SELECT * FROM example_table"
+)
+```
+{{% /code-tab-content %}}
+{{< /code-tabs-wrapper >}}
+
+_See the [`sql.from()` documentation](/v2.0/reference/flux/stdlib/sql/from/) for
+information about required function parameters._
+
+## Join SQL data with data in InfluxDB
+One of the primary benefits of querying SQL data sources from InfluxDB
+is the ability to enrich query results with data stored outside of InfluxDB.
+
+Using the [air sensor sample data](#sample-sensor-data) below, the following query
+joins air sensor metrics stored in InfluxDB with sensor information stored in PostgreSQL.
+The joined data lets you query and filter results based on sensor information
+that isn't stored in InfluxDB.
+
+```js
+// Import the "sql" package
+import "sql"
+
+// Query data from PostgreSQL
+sensorInfo = sql.from(
+ driverName: "postgres",
+ dataSourceName: "postgresql://localhost?sslmode=disable",
+ query: "SELECT * FROM sensors"
+)
+
+// Query data from InfluxDB
+sensorMetrics = from(bucket: "example-bucket")
+ |> range(start: -1h)
+ |> filter(fn: (r) => r._measurement == "airSensors")
+
+// Join InfluxDB query results with PostgreSQL query results
+join(tables: {metric: sensorMetrics, info: sensorInfo}, on: ["sensor_id"])
+```
+
+## Use SQL results to populate dashboard variables
+Use `sql.from()` to [create dashboard variables](/v2.0/visualize-data/variables/create-variable/)
+from SQL query results.
+The following example uses the [air sensor sample data](#sample-sensor-data) below to
+create a variable that lets you select the location of a sensor.
+
+```js
+import "sql"
+
+sql.from(
+ driverName: "postgres",
+ dataSourceName: "postgresql://localhost?sslmode=disable",
+ query: "SELECT * FROM sensors"
+ )
+ |> rename(columns: {location: "_value"})
+ |> keep(columns: ["_value"])
+```
+
+Use the variable to manipulate queries in your dashboards.
+
+{{< img-hd src="/img/2-0-sql-dashboard-variable.png" alt="Dashboard variable from SQL query results" />}}
+
+---
+
+## Sample sensor data
+The [sample data generator](#download-and-run-the-sample-data-generator) and
+[sample sensor information](#import-the-sample-sensor-information) simulate a
+group of sensors that measure temperature, humidity, and carbon monoxide
+in rooms throughout a building.
+Each collected data point is stored in InfluxDB with a `sensor_id` tag that identifies
+the specific sensor it came from.
+Sample sensor information is stored in PostgreSQL.
+
+**Sample data includes:**
+
+- Simulated data collected from each sensor and stored in the `airSensors` measurement in **InfluxDB**:
+ - temperature
+ - humidity
+ - co
+
+- Information about each sensor stored in the `sensors` table in **PostgreSQL**:
+ - sensor_id
+ - location
+ - model_number
+ - last_inspected
+
+### Import and generate sample sensor data
+
+#### Download and run the sample data generator
+`air-sensor-data.rb` is a script that generates air sensor data and stores the data in InfluxDB.
+To use `air-sensor-data.rb`:
+
+1. [Create a bucket](/v2.0/organizations/buckets/create-bucket/) to store the data.
+2. Download the sample data generator. _This tool requires [Ruby](https://www.ruby-lang.org/en/)._
+
+ Download Air Sensor Generator
+
+3. Give `air-sensor-data.rb` executable permissions:
+
+ ```
+ chmod +x air-sensor-data.rb
+ ```
+
+4. Start the generator. Specify your organization, bucket, and authorization token.
+ _For information about retrieving your token, see [View tokens](/v2.0/security/tokens/view-tokens/)._
+
+ ```
+ ./air-sensor-data.rb -o your-org -b your-bucket -t YOURAUTHTOKEN
+ ```
+
+ The generator begins to write data to InfluxDB and will continue until stopped.
+ Use `ctrl-c` to stop the generator.
+
+ _**Note:** Use the `--help` flag to view other configuration options._
+
+
+5. [Query your target bucket](/v2.0/query-data/execute-queries/) to ensure the
+ generated data is writing successfully.
+ The generator doesn't catch errors from write requests, so it will continue running
+ even if data is not writing to InfluxDB successfully.
+
+ ```
+ from(bucket: "example-bucket")
+ |> range(start: -1m)
+ |> filter(fn: (r) => r._measurement == "airSensors")
+ ```
+
+#### Import the sample sensor information
+1. [Download and install PostgreSQL](https://www.postgresql.org/download/).
+2. Download the sample sensor information CSV.
+
+ Download Sample Data
+
+3. Use a PostgreSQL client (`psql` or a GUI) to create the `sensors` table:
+
+ ```
+ CREATE TABLE sensors (
+ sensor_id character varying(50),
+ location character varying(50),
+ model_number character varying(50),
+ last_inspected date
+ );
+ ```
+
+4. Import the downloaded CSV sample data.
+ _Update the `FROM` file path to the path of the downloaded CSV sample data._
+
+ ```
+ COPY sensors(sensor_id,location,model_number,last_inspected)
+ FROM '/path/to/sample-sensor-info.csv' DELIMITER ',' CSV HEADER;
+ ```
+
+5. Query the table to ensure the data was imported correctly:
+
+ ```
+ SELECT * FROM sensors;
+ ```
+
+#### Import the sample data dashboard
+Download and import the Air Sensors dashboard to visualize the generated data:
+
+Download Air Sensors dashboard
+
+_For information about importing a dashboard, see [Create a dashboard](/v2.0/visualize-data/dashboards/create-dashboard/#create-a-new-dashboard)._
diff --git a/content/v2.0/query-data/guides/window-aggregate.md b/content/v2.0/query-data/guides/window-aggregate.md
index b47759063..d9320d394 100644
--- a/content/v2.0/query-data/guides/window-aggregate.md
+++ b/content/v2.0/query-data/guides/window-aggregate.md
@@ -86,7 +86,7 @@ Table: keys: [_start, _stop, _field, _measurement]
{{% /truncate %}}
## Windowing data
-Use the [`window()` function](/v2.0/reference/flux/functions/built-in/transformations/window)
+Use the [`window()` function](/v2.0/reference/flux/stdlib/built-in/transformations/window)
to group your data based on time bounds.
The most common parameter passed with the `window()` is `every` which
defines the duration of time between windows.
@@ -170,14 +170,14 @@ When visualized in the InfluxDB UI, each window table is displayed in a differen

## Aggregate data
-[Aggregate functions](/v2.0/reference/flux/functions/built-in/transformations/aggregates) take the values
+[Aggregate functions](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates) take the values
of all rows in a table and use them to perform an aggregate operation.
The result is output as a new value in a single-row table.
Since windowed data is split into separate tables, aggregate operations run against
each table separately and output new tables containing only the aggregated value.
-For this example, use the [`mean()` function](/v2.0/reference/flux/functions/built-in/transformations/aggregates/mean)
+For this example, use the [`mean()` function](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/mean)
to output the average of each window:
```js
@@ -241,7 +241,7 @@ These represent the lower and upper bounds of the time window.
Many Flux functions rely on the `_time` column.
To further process your data after an aggregate function, you need to re-add `_time`.
-Use the [`duplicate()` function](/v2.0/reference/flux/functions/built-in/transformations/duplicate) to
+Use the [`duplicate()` function](/v2.0/reference/flux/stdlib/built-in/transformations/duplicate) to
duplicate either the `_start` or `_stop` column as a new `_time` column.
```js
@@ -329,7 +329,7 @@ With the aggregate values in a single table, data points in the visualization ar
You have now created a Flux query that windows and aggregates data.
The data transformation process outlined in this guide should be used for all aggregation operations.
-Flux also provides the [`aggregateWindow()` function](/v2.0/reference/flux/functions/built-in/transformations/aggregates/aggregatewindow)
+Flux also provides the [`aggregateWindow()` function](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/aggregatewindow)
which performs all these separate functions for you.
The following Flux query will return the same results:
diff --git a/content/v2.0/reference/annotated-csv.md b/content/v2.0/reference/annotated-csv.md
new file mode 100644
index 000000000..9d8a462ab
--- /dev/null
+++ b/content/v2.0/reference/annotated-csv.md
@@ -0,0 +1,220 @@
+---
+title: Annotated CSV syntax
+description: >
+ Annotated CSV format is used to encode HTTP responses and results returned to the Flux `csv.from()` function.
+weight: 6
+menu:
+ v2_0_ref:
+ name: Annotated CSV
+---
+
+Annotated CSV (comma-separated values) format is used to encode HTTP responses and results returned to the Flux [`csv.from()` function](https://v2.docs.influxdata.com/v2.0/reference/flux/stdlib/csv/from/).
+
+CSV tables must be encoded in UTF-8 and Unicode Normal Form C as defined in [UAX15](http://www.unicode.org/reports/tr15/). Line endings must be CRLF (Carriage Return Line Feed) as defined by the `text/csv` MIME type in [RFC 4180](https://tools.ietf.org/html/rfc4180).
+
+## Examples
+
+In this topic, you'll find examples of valid CSV syntax for responses to the following query:
+
+```js
+from(bucket:"mydb/autogen")
+ |> range(start:2018-05-08T20:50:00Z, stop:2018-05-08T20:51:00Z)
+ |> group(columns:["_start","_stop", "region", "host"])
+ |> yield(name:"my-result")
+```
+
+## CSV response format
+
+Flux supports encodings listed below.
+
+### Tables
+
+A table may have the following rows and columns.
+
+#### Rows
+
+- **Annotation rows**: describe column properties.
+
+- **Header row**: defines column labels (one header row per table).
+
+- **Record row**: describes data in the table (one record per row).
+
+##### Example
+
+Encoding of a table with and without a header row.
+
+{{< code-tabs-wrapper >}}
+{{% code-tabs %}}
+[Header row](#)
+[Without header row](#)
+{{% /code-tabs %}}
+
+{{% code-tab-content %}}
+```js
+result,table,_start,_stop,_time,region,host,_value
+my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43
+my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25
+my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62
+```
+{{% /code-tab-content %}}
+
+{{% code-tab-content %}}
+```js
+my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43
+my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25
+my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62
+```
+{{% /code-tab-content %}}
+{{< /code-tabs-wrapper >}}
+
+#### Columns
+
+In addition to the data columns, a table may include the following columns:
+
+- **Annotation column**: Only used in annotation rows. Always the first column. Displays the name of an annotation. Value can be empty or a supported [annotation](#annotations). You'll notice a space for this column for the entire length of the table, so rows appear to start with `,`.
+
+- **Result column**: Contains the name of the result specified by the query.
+
+- **Table column**: Contains a unique ID for each table in a result.
+
+### Multiple tables and results
+
+If a file or data stream contains multiple tables or results, the following requirements must be met:
+
+- A table column indicates which table a row belongs to.
+- All rows in a table are contiguous.
+- An empty row delimits a new table boundary in the following cases:
+ - Between tables in the same result that do not share a common table schema.
+ - Between concatenated CSV files.
+- Each new table boundary starts with new annotation and header rows.
+
+##### Example
+
+Encoding of two tables in the same result with the same schema (header row) and different schema.
+
+{{< code-tabs-wrapper >}}
+{{% code-tabs %}}
+[Same schema](#)
+[Different schema](#)
+{{% /code-tabs %}}
+
+{{% code-tab-content %}}
+```js
+result,table,_start,_stop,_time,region,host,_value
+my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43
+my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25
+my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62
+my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,west,A,62.73
+my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,west,B,12.83
+my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,west,C,51.62
+
+```
+{{% /code-tab-content %}}
+
+{{% code-tab-content %}}
+```js
+,result,table,_start,_stop,_time,region,host,_value
+,my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43
+,my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25
+,my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62
+
+,result,table,_start,_stop,_time,location,device,min,max
+,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,USA,5825,62.73,68.42
+,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,USA,2175,12.83,56.12
+,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,USA,6913,51.62,54.25
+```
+{{% /code-tab-content %}}
+{{< /code-tabs-wrapper >}}
+
+### Dialect options
+
+Flux supports the following dialect options for `text/csv` format.
+
+| Option | Description| Default |
+| :-------- | :--------- | :-------|
+| **header** | If true, the header row is included.| `true`|
+| **delimiter** | Character used to delimit columns. | `,`|
+| **quoteChar** | Character used to quote values containing the delimiter. |`"`|
+| **annotations** | List of annotations to encode (datatype, group, or default). |`empty`|
+| **commentPrefix** | String prefix to identify a comment. Always added to annotations. |`#`|
+
+### Annotations
+
+Annotation rows describe column properties, and start with `#` (or commentPrefix value). The first column in an annotation row always contains the annotation name. Subsequent columns contain annotation values as shown in the table below.
+
+|Annotation name | Values| Description |
+| :-------- | :--------- | :-------|
+| **datatype** | a [valid data type](#valid-data-types) | Describes the type of data. |
+| **group** | boolean flag `true` or `false` | Indicates the column is part of the group key.|
+| **default** | a [valid data type](#valid-data-types) |Value to use for rows with an empty string value.|
+
+{{% note %}}
+To encode a table with its group key, the `datatype`, `group`, and `default` annotations must be included. If a table has no rows, the `default` annotation provides the group key values.
+{{% /note %}}
+
+##### Example
+
+Example encoding of datatype, group, and default annotations.
+
+```js
+import "csv"
+a = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,string,string,double
+#group,false,false,false,false,false,false,false,false
+#default,,,,,,,,
+,result,table,_start,_stop,_time,region,host,_value
+,my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43
+,my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25
+,my-result,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62
+,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,west,A,62.73
+,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,west,B,12.83
+,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,west,C,51.62
+"
+csv.from(csv:a) |> yield()
+```
+
+### Valid data types
+
+| Datatype | Flux type | Description |
+| :-------- | :--------- | :-----------------------------------------------------------------------------|
+| boolean | bool | a truth value, one of "true" or "false" |
+| unsignedLong | uint | an unsigned 64-bit integer |
+| long | int | a signed 64-bit integer |
+| double | float | an IEEE-754 64-bit floating-point number |
+| string | string | a UTF-8 encoded string |
+| base64Binary | bytes | a base64 encoded sequence of bytes as defined in RFC 4648 |
+| dateTime | time | an instant in time, may be followed with a colon : and a description of the format |
+| duration | duration | a length of time represented as an unsigned 64-bit integer number of nanoseconds |
+
+## Errors
+
+If an error occurs during execution, a table returns with:
+
+- An error column that contains an error message.
+- A reference column with a unique reference code to identify more information about the error.
+- A second row with error properties.
+
+If an error occurs:
+
+- Before results materialize, the HTTP status code indicates an error. Error details are encoded in the csv table.
+- After partial results are sent to the client, the error is encoded as the next table and remaining results are discarded. In this case, the HTTP status code remains 200 OK.
+
+##### Example
+
+Encoding for an error with the datatype annotation:
+ ```js
+#datatype,string,long
+,error,reference
+,Failed to parse query,897
+ ```
+
+Encoding for an error that occurs after a valid table has been encoded:
+ ```js
+#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,string,string,double,result,table,_start,_stop,_time,region,host,_value
+,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,west,A,62.73
+,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,west,B,12.83
+,my-result,1,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,west,C,51.62
+ ```
+```js
+#datatype,string,long
+,error,reference,query terminated: reached maximum allowed memory limits,576
+```
diff --git a/content/v2.0/reference/api.md b/content/v2.0/reference/api.md
index 62352ca3a..2ec766770 100644
--- a/content/v2.0/reference/api.md
+++ b/content/v2.0/reference/api.md
@@ -4,7 +4,7 @@ description: >
The InfluxDB v2 API provides a programmatic interface for interactions with InfluxDB.
Access the InfluxDB API using the `/api/v2/` endpoint.
menu: v2_0_ref
-weight: 2
+weight: 3
v2.0/tags: [api]
---
@@ -15,15 +15,37 @@ Access the InfluxDB API using the `/api/v2/` endpoint.
InfluxDB uses [authentication tokens](/v2.0/security/tokens/) to authorize API requests.
Include your authentication token as an `Authorization` header in each request.
+{{< code-tabs-wrapper >}}
+{{% code-tabs %}}
+[InfluxDB OSS](#)
+[InfluxDB Cloud](#)
+{{% /code-tabs %}}
+{{% code-tab-content %}}
```sh
-curl --request GET \
- --url http://localhost:9999/api/v2/ \
+curl --request POST \
+ --url http://localhost:9999/api/v2/write?org=my-org&bucket=example-bucket \
--header 'Authorization: Token YOURAUTHTOKEN'
```
+{{% /code-tab-content %}}
+{{% code-tab-content %}}
+```sh
+# Use the hostname of your InfluxDB Cloud UI
+curl --request POST \
+ --url https://us-west-2-1.aws.cloud2.influxdata.com/api/v2/write?org=my-org&bucket=example-bucket \
+ --header 'Authorization: Token YOURAUTHTOKEN'
+```
+{{% /code-tab-content %}}
+{{< /code-tabs-wrapper >}}
-## View Influx v2 API Documentation
-Full InfluxDB v2 API documentation is built into the `influxd` service.
-To view the API documentation, [start InfluxDB](/v2.0/get-started/#start-influxdb)
-and visit the `/docs` endpoint in a browser.
+## View InfluxDB v2 API Documentation
+InfluxDB v2.0 API documentation
-localhost:9999/docs
+### View InfluxDB API documentation locally
+InfluxDB API documentation is built into the `influxd` service and represents
+the API specific to the current version of InfluxDB.
+To view the API documentation locally, [start InfluxDB](/v2.0/get-started/#start-influxdb)
+and visit the `/docs` endpoint in a browser ([localhost:9999/docs](http://localhost:9999/docs)).
+
+## InfluxDB client libraries
+InfluxDB client libraries are language-specific packages that integrate with the InfluxDB v2 API.
+For information about supported client libraries, see [InfluxDB client libraries](/v2.0/reference/client-libraries/).
diff --git a/content/v2.0/reference/cli/_index.md b/content/v2.0/reference/cli/_index.md
index e4021ac27..54b6fa79e 100644
--- a/content/v2.0/reference/cli/_index.md
+++ b/content/v2.0/reference/cli/_index.md
@@ -8,7 +8,7 @@ v2.0/tags: [cli]
menu:
v2_0_ref:
name: Command line tools
-weight: 3
+weight: 4
---
InfluxDB provides command line tools designed to aid in managing and working
diff --git a/content/v2.0/reference/cli/influx/repl/_index.md b/content/v2.0/reference/cli/influx/repl/_index.md
index 6657d0026..e7fa7dba4 100644
--- a/content/v2.0/reference/cli/influx/repl/_index.md
+++ b/content/v2.0/reference/cli/influx/repl/_index.md
@@ -19,6 +19,12 @@ from which you can run Flux commands.
influx repl [flags]
```
+{{% note %}}
+Use **ctrl + d** to exit the REPL.
+{{% /note %}}
+
+To use the Flux REPL, you must first authenticate with a [token](/v2.0/security/tokens/view-tokens/).
+
## Flags
| Flag | Description | Input type |
|:---- |:----------- |:----------:|
diff --git a/content/v2.0/reference/cli/influxd/inspect/_index.md b/content/v2.0/reference/cli/influxd/inspect/_index.md
index a8e21ecef..0c3dd0e90 100644
--- a/content/v2.0/reference/cli/influxd/inspect/_index.md
+++ b/content/v2.0/reference/cli/influxd/inspect/_index.md
@@ -12,15 +12,24 @@ The `influxd inspect` commands and subcommands inspecting on-disk InfluxDB time
## Usage
```sh
-influxd inspect [command]
+influxd inspect [subcommand]
```
## Subcommands
-| Subcommand | Description |
-|:---------- |:----------- |
-| [report-tsm](/v2.0/reference/cli/influxd/inspect/report-tsm/) | Run TSM report |
+| Subcommand | Description |
+|:---------- |:----------- |
+| [build-tsi](/v2.0/reference/cli/influxd/inspect/build-tsi/) | Rebuild the TSI index and series file. |
+| [dump-tsi](/v2.0/reference/cli/influxd/inspect/dump-tsi/) | Output low level TSI information |
+| [dumpwal](/v2.0/reference/cli/influxd/inspect/dumpwal/) | Output TSM data from WAL files |
+| [export-blocks](/v2.0/reference/cli/influxd/inspect/export-blocks/) | Export block data |
+| [export-index](/v2.0/reference/cli/influxd/inspect/export-index/) | Export TSI index data |
+| [report-tsi](/v2.0/reference/cli/influxd/inspect/report-tsi/) | Report the cardinality of TSI files |
+| [report-tsm](/v2.0/reference/cli/influxd/inspect/report-tsm/) | Run TSM report |
+| [verify-seriesfile](/v2.0/reference/cli/influxd/inspect/verify-seriesfile/) | Verify the integrity of series files |
+| [verify-tsm](/v2.0/reference/cli/influxd/inspect/verify-tsm/) | Check the consistency of TSM files |
+| [verify-wal](/v2.0/reference/cli/influxd/inspect/verify-wal/) | Check for corrupt WAL files |
## Flags
-| Flag | Description |
-|:---- |:----------- |
-| `-h`, `--help` | help for inspect |
+| Flag | Description |
+|:---- |:----------- |
+| `-h`, `--help` | Help for `inspect` |
diff --git a/content/v2.0/reference/cli/influxd/inspect/build-tsi.md b/content/v2.0/reference/cli/influxd/inspect/build-tsi.md
new file mode 100644
index 000000000..7d873b4a6
--- /dev/null
+++ b/content/v2.0/reference/cli/influxd/inspect/build-tsi.md
@@ -0,0 +1,58 @@
+---
+title: influxd inspect build-tsi
+description: >
+ The `influxd inspect build-tsi` command rebuilds the TSI index and, if necessary,
+ the series file.
+v2.0/tags: [tsi]
+menu:
+ v2_0_ref:
+ parent: influxd inspect
+weight: 301
+---
+
+The `influxd inspect build-tsi` command rebuilds the TSI index and, if necessary,
+the series file.
+
+## Usage
+```sh
+influxd inspect build-tsi [flags]
+```
+
+InfluxDB builds the index by reading all Time-Structured Merge tree (TSM) indexes
+and Write Ahead Log (WAL) entries in the TSM and WAL data directories.
+If the series file directory is missing, it rebuilds the series file.
+If the TSI index directory already exists, the command will fail.
+
+### Adjust performance
+Use the following options to adjust the performance of the indexing process:
+
+##### --max-log-file-size
+`--max-log-file-size` determines how much of an index to store in memory before
+compacting it into memory-mappable index files.
+If you find the memory requirements of your TSI index are too high, consider
+decreasing this setting.
+
+##### --max-cache-size
+`--max-cache-size` defines the maximum cache size.
+The indexing process replays WAL files into a `tsm1.Cache`.
+If the maximum cache size is too low, the indexing process will fail.
+Increase `--max-cache-size` to account for the size of your WAL files.
+
+##### --batch-size
+`--batch-size` defines the size of the batches written into the index.
+Altering the batch size can improve performance but may result in significantly
+higher memory usage.
+
+## Flags
+| Flag | Description | Input Type |
+|:---- |:----------- |:----------:|
+| `--batch-size` | The size of the batches to write to the index. Defaults to `10000`. [See above](#batch-size). | integer |
+| `--concurrency` | Number of workers to dedicate to shard index building. Defaults to `GOMAXPROCS` (8 by default). | integer |
+| `-h`, `--help` | Help for `build-tsi`. | |
+| `--max-cache-size` | Maximum cache size. Defaults to `1073741824`. [See above](#max-cache-size). | uinteger |
+| `--max-log-file-size` | Maximum log file size. Defaults to `1048576`. [See above](#max-log-file-size) . | integer |
+| `--sfile-path` | Path to the series file directory. Defaults to `~/.influxdbv2/engine/_series`. | string |
+| `--tsi-path` | Path to the TSI index directory. Defaults to `~/.influxdbv2/engine/index`. | string |
+| `--tsm-path` | Path to the TSM data directory. Defaults to `~/.influxdbv2/engine/data`. | string |
+| `-v`, `--verbose` | Enable verbose output. | |
+| `--wal-path` | Path to the WAL data directory. Defaults to `~/.influxdbv2/engine/wal`. | string |
diff --git a/content/v2.0/reference/cli/influxd/inspect/dump-tsi.md b/content/v2.0/reference/cli/influxd/inspect/dump-tsi.md
new file mode 100644
index 000000000..a15bc1020
--- /dev/null
+++ b/content/v2.0/reference/cli/influxd/inspect/dump-tsi.md
@@ -0,0 +1,33 @@
+---
+title: influxd inspect dump-tsi
+description: >
+ The `influxd inspect dump-tsi` command outputs low-level information about `tsi1` files.
+v2.0/tags: [tsi, inspect]
+menu:
+ v2_0_ref:
+ parent: influxd inspect
+weight: 301
+---
+
+The `influxd inspect dump-tsi` command outputs low-level information about
+Time Series Index (`tsi1`) files.
+
+## Usage
+```sh
+influxd inspect dump-tsi [flags]
+```
+
+## Flags
+| Flag | Description | Input Type |
+|:---- |:----------- |:----------:|
+| `-h`, `--help` | Help for `dump-tsi`. | |
+| `--index-path` | Path to data engine index directory (defaults to `~/.influxdbv2/engine/index`). | string |
+| `--measurement-filter` | Regular expression measurement filter. | string |
+| `--measurements` | Show raw measurement data. | |
+| `--series` | Show raw series data. | |
+| `--series-path` | Path to series file (defaults to `~/.influxdbv2/engine/_series`). | string |
+| `--tag-key-filter` | Regular expression tag key filter. | string |
+| `--tag-keys` | Show raw tag key data. | |
+| `--tag-value-filter` | Regular expression tag value filter. | string |
+| `--tag-value-series` | Show raw series data for each value. | |
+| `--tag-values` | Show raw tag value data. | |
diff --git a/content/v2.0/reference/cli/influxd/inspect/dumpwal.md b/content/v2.0/reference/cli/influxd/inspect/dumpwal.md
new file mode 100644
index 000000000..854226983
--- /dev/null
+++ b/content/v2.0/reference/cli/influxd/inspect/dumpwal.md
@@ -0,0 +1,68 @@
+---
+title: influxd inspect dumpwal
+description: >
+ The `influxd inspect dumpwal` command outputs data from WAL files.
+v2.0/tags: [wal, inspect]
+menu:
+ v2_0_ref:
+ parent: influxd inspect
+weight: 301
+---
+
+The `influxd inspect dumpwal` command outputs data from Write Ahead Log (WAL) files.
+Given a list of file path globs (patterns that match `.wal` file paths),
+the command parses and prints out entries in each file.
+
+## Usage
+```sh
+influxd inspect dumpwal [flags]
+```
+
+## Output details
+The `--find-duplicates` flag determines the `influxd inspect dumpwal` output.
+
+**Without `--find-duplicates`**, the command outputs the following for each file
+that matches the specified [globbing patterns](#globbing-patterns):
+
+- The file name
+- For each entry in a file:
+ - The type of the entry (`[write]` or `[delete-bucket-range]`)
+ - The formatted entry contents
+
+**With `--find-duplicates`**, the command outputs the following for each file
+that matches the specified [globbing patterns](#globbing-patterns):
+
+- The file name
+- A list of keys with timestamps in the wrong order
+
+## Arguments
+
+### Globbing patterns
+Globbing patterns provide partial paths used to match file paths and names.
+
+##### Example globbing patterns
+```sh
+# Match any file or folder starting with "foo"
+foo*
+
+# Match any file or folder starting with "foo" and ending with .txt
+foo*.txt
+
+# Match any file or folder ending with "foo"
+*foo
+
+# Match foo/bar/baz but not foo/bar/bin/baz
+foo/*/baz
+
+# Match foo/baz and foo/bar/baz and foo/bar/bin/baz
+foo/**/baz
+
+# Matches cat but not can or c/t
+/c?t
+```
+
+## Flags
+| Flag | Description |
+|:---- |:----------- |
+| `--find-duplicates` | Ignore dumping entries; only report keys in the WAL that are out of order. |
+| `-h`, `--help` | Help for `dumpwal`. |
diff --git a/content/v2.0/reference/cli/influxd/inspect/export-blocks.md b/content/v2.0/reference/cli/influxd/inspect/export-blocks.md
new file mode 100644
index 000000000..18f187d30
--- /dev/null
+++ b/content/v2.0/reference/cli/influxd/inspect/export-blocks.md
@@ -0,0 +1,24 @@
+---
+title: influxd inspect export-blocks
+description: >
+ The `influxd inspect export-blocks` command exports all blocks in one or more
+ TSM1 files to another format for easier inspection and debugging.
+v2.0/tags: [inspect]
+menu:
+ v2_0_ref:
+ parent: influxd inspect
+weight: 301
+---
+
+The `influxd inspect export-blocks` command exports all blocks in one or more
+TSM1 files to another format for easier inspection and debugging.
+
+## Usage
+```sh
+influxd inspect export-blocks [flags]
+```
+
+## Flags
+| Flag | Description |
+|:---- |:----------- |
+| `-h`, `--help` | Help for `export-blocks`. |
diff --git a/content/v2.0/reference/cli/influxd/inspect/export-index.md b/content/v2.0/reference/cli/influxd/inspect/export-index.md
new file mode 100644
index 000000000..71d0e260a
--- /dev/null
+++ b/content/v2.0/reference/cli/influxd/inspect/export-index.md
@@ -0,0 +1,26 @@
+---
+title: influxd inspect export-index
+description: >
+ The `influxd inspect export-index` command exports all series in a TSI index to
+ SQL format for inspection and debugging.
+v2.0/tags: [inspect]
+menu:
+ v2_0_ref:
+ parent: influxd inspect
+weight: 301
+---
+
+The `influxd inspect export-index` command exports all series in a TSI index to
+SQL format for inspection and debugging.
+
+## Usage
+```sh
+influxd inspect export-index [flags]
+```
+
+## Flags
+| Flag | Description | Input type |
+|:---- |:----------- |:----------:|
+| `-h`, `--help` | Help for `export-index`. | |
+| `--index-path` | Path to the index directory. Defaults to `~/.influxdbv2/engine/index`). | string |
+| `--series-path` | Path to series file. Defaults to `~/.influxdbv2/engine/_series`). | string |
diff --git a/content/v2.0/reference/cli/influxd/inspect/report-tsi.md b/content/v2.0/reference/cli/influxd/inspect/report-tsi.md
new file mode 100644
index 000000000..630a38755
--- /dev/null
+++ b/content/v2.0/reference/cli/influxd/inspect/report-tsi.md
@@ -0,0 +1,44 @@
+---
+title: influxd inspect report-tsi
+description: >
+ The `influxd inspect report-tsi` command analyzes Time Series Index (TSI) files
+ in a storage directory and reports the cardinality of data stored in the files.
+v2.0/tags: [tsi, cardinality, inspect]
+menu:
+ v2_0_ref:
+ parent: influxd inspect
+weight: 301
+---
+
+The `influxd inspect report-tsi` command analyzes Time Series Index (TSI) files
+in a storage directory and reports the cardinality of data stored in the files
+by organization and bucket.
+
+## Output details
+`influxd inspect report-tsi` outputs the following:
+
+- All organizations and buckets in the index.
+- The series cardinality within each organization and bucket.
+- Time to read the index.
+
+When the `--measurements` flag is included, series cardinality is grouped by:
+
+- organization
+- bucket
+- measurement
+
+## Usage
+```sh
+influxd inspect report-tsi [flags]
+```
+
+## Flags
+| Flag | Description | Input Type |
+|:---- |:----------- |:----------:|
+| `--bucket-id` | Process data for specified bucket ID. _Requires `org-id` flag to be set._ | string |
+| `-h`, `--help` | View help for `report-tsi`. | |
+| `-m`, `--measurements` | Group cardinality by measurements. | |
+| `-o`, `--org-id` | Process data for specified organization ID. | string |
+| `--path` | Specify path to index. Defaults to `~/.influxdbv2/engine/index`. | string |
+| `--series-file` | Specify path to series file. Defaults to `~/.influxdbv2/engine/_series`. | string |
+| `-t`, `-top` | Limit results to the top n. | integer |
diff --git a/content/v2.0/reference/cli/influxd/inspect/report-tsm.md b/content/v2.0/reference/cli/influxd/inspect/report-tsm.md
index 85413c1a2..80e23c00f 100644
--- a/content/v2.0/reference/cli/influxd/inspect/report-tsm.md
+++ b/content/v2.0/reference/cli/influxd/inspect/report-tsm.md
@@ -49,7 +49,7 @@ in the following ways:
| Flag | Description | Input Type |
|:---- |:----------- |:----------:|
| `--bucket-id` | Process only data belonging to bucket ID. _Requires `org-id` flag to be set._ | string |
-| `--data-dir` | Use provided data directory (defaults to ~/.influxdbv2/engine/data). | string |
+| `--data-dir` | Use provided data directory (defaults to `~/.influxdbv2/engine/data`). | string |
| `--detailed` | Emit series cardinality segmented by measurements, tag keys, and fields. _**May take a while**_. | |
| `--exact` | Calculate an exact cardinality count. _**May use significant memory**_. | |
| `-h`, `--help` | Help for `report-tsm`. | |
diff --git a/content/v2.0/reference/cli/influxd/inspect/verify-seriesfile.md b/content/v2.0/reference/cli/influxd/inspect/verify-seriesfile.md
new file mode 100644
index 000000000..28563678f
--- /dev/null
+++ b/content/v2.0/reference/cli/influxd/inspect/verify-seriesfile.md
@@ -0,0 +1,25 @@
+---
+title: influxd inspect verify-seriesfile
+description: >
+ The `influxd inspect verify-seriesfile` command verifies the integrity of series files.
+v2.0/tags: [inspect]
+menu:
+ v2_0_ref:
+ parent: influxd inspect
+weight: 301
+---
+
+The `influxd inspect verify-seriesfile` command verifies the integrity of series files.
+
+## Usage
+```sh
+influxd inspect verify-seriesfile [flags]
+```
+
+## Flags
+| Flag | Description | Input Type |
+|:---- |:----------- |:----------:|
+| `-c`, `--c` | Number of workers to run concurrently (defaults to 8). | integer |
+| `-h`, `--help` | Help for `verify-seriesfile`. | |
+| `--series-file` | Path to series file (defaults to `~/.influxdbv2/engine/_series`). | string |
+| `-v`, `--verbose` | Enable verbose output. | |
diff --git a/content/v2.0/reference/cli/influxd/inspect/verify-tsm.md b/content/v2.0/reference/cli/influxd/inspect/verify-tsm.md
new file mode 100644
index 000000000..e2c5cac84
--- /dev/null
+++ b/content/v2.0/reference/cli/influxd/inspect/verify-tsm.md
@@ -0,0 +1,34 @@
+---
+title: influxd inspect verify-tsm
+description: >
+ The `influxd inspect verify-tsm` command analyzes a set of TSM files for inconsistencies
+ between the TSM index and the blocks.
+v2.0/tags: [tsm, inspect]
+menu:
+ v2_0_ref:
+ parent: influxd inspect
+weight: 301
+---
+
+The `influxd inspect verify-tsm` command analyzes a set of TSM files for inconsistencies
+between the TSM index and the blocks. It performs the following checks:
+
+- Ensures CRC-32 checksums match for each block.
+- Ensures the minimum and maximum timestamps in the TSM index match the decoded data.
+
+## Usage
+```sh
+influxd inspect verify-tsm ... [flags]
+```
+
+## Arguments
+
+### pathspec
+A list of files or directories in which to search for TSM files.
+
+## Flags
+| Flag | Description | Input Type |
+|:---- |:----------- |:----------:|
+| `--bucket-id` | Limit analysis to a specific bucket ID. _Optional._ | string |
+| `-h`, `--help` | Help for `verify-tsm`. | |
+| `--org-id` | Limit analysis to a specific organization ID. _Optional._ | string |
diff --git a/content/v2.0/reference/cli/influxd/inspect/verify-wal.md b/content/v2.0/reference/cli/influxd/inspect/verify-wal.md
new file mode 100644
index 000000000..815ff481c
--- /dev/null
+++ b/content/v2.0/reference/cli/influxd/inspect/verify-wal.md
@@ -0,0 +1,39 @@
+---
+title: influxd inspect verify-wal
+description: >
+ The `influxd inspect verify-wal` command analyzes the Write-Ahead Log (WAL)
+ to check if there are any corrupt files.
+v2.0/tags: [wal, inspect]
+menu:
+ v2_0_ref:
+ parent: influxd inspect
+weight: 301
+---
+
+The `influxd inspect verify-wal` command analyzes the Write-Ahead Log (WAL)
+to check if there are any corrupt files.
+If it finds corrupt files, the command returns the names of those files.
+It also returns the total number of entries in each scanned WAL file.
+
+## Usage
+```sh
+influxd inspect verify-wal [flags]
+```
+
+## Output details
+`influxd inspect verify-wal` outputs the following for each file:
+
+- The file name.
+- The first position of any identified corruption or "clean" if no corruption is found.
+
+After the verification is complete, it returns a summary with:
+
+- The number of WAL files scanned.
+- The number of WAL entries scanned.
+- A list of files found to be corrupt.
+
+## Flags
+| Flag | Description | Input Type |
+|:---- |:----------- |:----------:|
+| `--data-dir` | The data directory to scan (default `~/.influxdbv2/engine/wal`). | string |
+| `-h`, `--help` | Help for `verify-wal`. | |
diff --git a/content/v2.0/reference/client-libraries.md b/content/v2.0/reference/client-libraries.md
index f35c0c2cb..004dd7cc7 100644
--- a/content/v2.0/reference/client-libraries.md
+++ b/content/v2.0/reference/client-libraries.md
@@ -3,7 +3,7 @@ title: InfluxDB client libraries
description: >
InfluxDB client libraries are language-specific tools that integrate with the InfluxDB v2 API.
View the list of available client libraries.
-weight: 6
+weight: 4
menu:
v2_0_ref:
name: Client libraries
@@ -18,5 +18,8 @@ These client libraries are in active development and may not be feature-complete
This list will continue to grow as more client libraries are released.
{{% /note %}}
-
+- [C#](https://github.com/influxdata/influxdb-client-csharp)
+- [Go](https://github.com/influxdata/influxdb-client-go)
+- [Java](https://github.com/influxdata/influxdb-client-java)
- [JavaScript/Node.js](https://github.com/influxdata/influxdb-client-js)
+- [Python](https://github.com/influxdata/influxdb-client-python)
diff --git a/content/v2.0/reference/config-options.md b/content/v2.0/reference/config-options.md
index 563409522..de047c4e3 100644
--- a/content/v2.0/reference/config-options.md
+++ b/content/v2.0/reference/config-options.md
@@ -6,7 +6,7 @@ description: >
menu:
v2_0_ref:
name: Configuration options
- weight: 2
+ weight: 3
---
To configure InfluxDB, use the following configuration options when starting the
diff --git a/content/v2.0/reference/flux/_index.md b/content/v2.0/reference/flux/_index.md
index 647a0407d..a1b77af63 100644
--- a/content/v2.0/reference/flux/_index.md
+++ b/content/v2.0/reference/flux/_index.md
@@ -8,7 +8,7 @@ menu:
weight: 4
---
-The following articles are meant as a reference for Flux functions and the
-Flux language specification.
+The following articles are meant as a reference for the Flux standard library and
+the Flux language specification.
{{< children >}}
diff --git a/content/v2.0/reference/flux/functions/_index.md b/content/v2.0/reference/flux/functions/_index.md
deleted file mode 100644
index ee315518b..000000000
--- a/content/v2.0/reference/flux/functions/_index.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: Flux packages and functions
-description: Flux packages and functions allows you to retrieve, transform, process, and output data easily.
-v2.0/tags: [flux, functions, package]
-menu:
- v2_0_ref:
- name: Flux packages and functions
- parent: Flux query language
-weight: 102
----
-
-Flux's functional syntax allows you to retrieve, transform, process, and output data easily.
-There is a large library of built-in functions and importable packages:
-
-{{< children >}}
diff --git a/content/v2.0/reference/flux/functions/built-in/misc/now.md b/content/v2.0/reference/flux/functions/built-in/misc/now.md
deleted file mode 100644
index d342ad0b0..000000000
--- a/content/v2.0/reference/flux/functions/built-in/misc/now.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-title: now() function
-description: The `now()` function returns the current time (UTC).
-menu:
- v2_0_ref:
- name: now
- parent: built-in-misc
-weight: 401
----
-
-The `now()` function returns the current time (UTC).
-
-_**Function type:** Date/Time_
-_**Output data type:** Time_
-
-```js
-now()
-```
-
-## Examples
-```js
-data
- |> range(start: -10h, stop: now())
-```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/limit.md b/content/v2.0/reference/flux/functions/built-in/transformations/limit.md
deleted file mode 100644
index 8a216e38f..000000000
--- a/content/v2.0/reference/flux/functions/built-in/transformations/limit.md
+++ /dev/null
@@ -1,48 +0,0 @@
----
-title: limit() function
-description: The `limit()` function limits the number of records in output tables to a fixed number (n).
-aliases:
- - /v2.0/reference/flux/functions/transformations/limit
-menu:
- v2_0_ref:
- name: limit
- parent: built-in-transformations
-weight: 401
----
-
-The `limit()` function limits the number of records in output tables to a fixed number ([`n`](#n)).
-One output table is produced for each input table.
-Each output table contains the first `n` records after the first `offset` records of the input table.
-If the input table has less than `offset + n` records, all records except the first `offset` ones are output.
-
-_**Function type:** Filter_
-_**Output data type:** Object_
-
-```js
-limit(n:10, offset: 0)
-```
-
-## Parameters
-
-### n
-The maximum number of records to output.
-
-_**Data type:** Integer_
-
-### offset
-The number of records to skip per table before limiting to `n`.
-Defaults to `0`.
-
-_**Data type:** Integer_
-
-## Examples
-```js
-from(bucket:"example-bucket")
- |> range(start:-1h)
- |> limit(n:10, offset: 1)
-```
-
-
-
-##### Related InfluxQL functions and statements:
-[LIMIT](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#the-limit-and-slimit-clauses)
diff --git a/content/v2.0/reference/flux/functions/math/m_inf.md b/content/v2.0/reference/flux/functions/math/m_inf.md
deleted file mode 100644
index da77b79ec..000000000
--- a/content/v2.0/reference/flux/functions/math/m_inf.md
+++ /dev/null
@@ -1,30 +0,0 @@
----
-title: math.m_inf() function
-description: The math.m_inf() function returns positive infinity if `sign >= 0`, negative infinity if `sign < 0`.
-
-
-menu:
- v2_0_ref:
- name: math.m_inf
- parent: Math
-weight: 301
----
-
-The `math.m_inf()` function returns positive infinity if `sign >= 0`, negative infinity if `sign < 0`.
-
-_**Output data type:** Float_
-
-```js
-import "math"
-
-math.m_inf(sign: 1)
-
-// Returns +Inf
-```
-
-## Parameters
-
-### sign
-The sign value used in the operation.
-
-_**Data type:** Integer_
diff --git a/content/v2.0/reference/flux/functions/math/m_max.md b/content/v2.0/reference/flux/functions/math/m_max.md
deleted file mode 100644
index 3dd4c422e..000000000
--- a/content/v2.0/reference/flux/functions/math/m_max.md
+++ /dev/null
@@ -1,44 +0,0 @@
----
-title: math.m_max() function
-description: The math.m_max() function returns the larger of `x` or `y`.
-menu:
- v2_0_ref:
- name: math.m_max
- parent: Math
-weight: 301
----
-
-The `math.m_max()` function returns the larger of `x` or `y`.
-
-_**Output data type:** Float_
-
-```js
-import "math"
-
-math.m_max(x: 1.23, y: 4.56)
-
-// Returns 4.56
-```
-
-## Parameters
-
-### x
-The X value used in the operation.
-
-_**Data type:** Float_
-
-### y
-The Y value used in the operation.
-
-_**Data type:** Float_
-
-## Special cases
-```js
-math.m_max(x:x, y:+Inf) // Returns +Inf
-math.m_max(x: +Inf, y:y) // Returns +Inf
-math.m_max(x:x, y: NaN) // Returns NaN
-math.m_max(x: NaN, y:y) // Returns NaN
-math.m_max(x: +0, y: ±0) // Returns +0
-math.m_max(x: ±0, y: +0) // Returns +0
-math.m_max(x: -0, y: -0) // Returns -0
-```
diff --git a/content/v2.0/reference/flux/functions/math/m_min.md b/content/v2.0/reference/flux/functions/math/m_min.md
deleted file mode 100644
index 404982967..000000000
--- a/content/v2.0/reference/flux/functions/math/m_min.md
+++ /dev/null
@@ -1,43 +0,0 @@
----
-title: math.m_min() function
-description: The math.m_min() function returns the smaller of `x` or `y`.
-menu:
- v2_0_ref:
- name: math.m_min
- parent: Math
-weight: 301
----
-
-The `math.m_min()` function returns the smaller of `x` or `y`.
-
-_**Output data type:** Float_
-
-```js
-import "math"
-
-math.m_min(x: 1.23, y: 4.56)
-
-// Returns 1.23
-```
-
-## Parameters
-
-### x
-The X value used in the operation.
-
-_**Data type:** Float_
-
-### y
-The Y value used in the operation.
-
-_**Data type:** Float_
-
-## Special cases
-```js
-Min(x:x, y: -Inf) // Returns -Inf
-Min(x: -Inf, y:y) // Returns -Inf
-Min(x:x, y: NaN) // Returns NaN
-Min(x: NaN, y:y) // Returns NaN
-Min(x: -0, y: ±0) // Returns -0
-Min(x: ±0, y: -0) // Returns -0
-```
diff --git a/content/v2.0/reference/flux/functions/strings/toupper.md b/content/v2.0/reference/flux/functions/strings/toupper.md
deleted file mode 100644
index 81cb9d12f..000000000
--- a/content/v2.0/reference/flux/functions/strings/toupper.md
+++ /dev/null
@@ -1,38 +0,0 @@
----
-title: strings.toUpper() function
-description: The strings.toUpper() function converts a string to upper case.
-menu:
- v2_0_ref:
- name: strings.toUpper
- parent: Strings
-weight: 301
----
-
-The `strings.toUpper()` function converts a string to upper case.
-
-_**Output data type:** String_
-
-```js
-import "strings"
-
-strings.toUpper(v: "koala")
-
-// returns "KOALA"
-```
-
-## Paramters
-
-### v
-The string value to convert.
-
-_**Data type:** String_
-
-## Examples
-
-###### Convert all values of a column to upper case
-```js
-import "strings"
-
-data
- |> map(fn:(r) => strings.toUpper(v: r.envVars))
-```
diff --git a/content/v2.0/reference/flux/functions/system/time.md b/content/v2.0/reference/flux/functions/system/time.md
deleted file mode 100644
index 59a768a92..000000000
--- a/content/v2.0/reference/flux/functions/system/time.md
+++ /dev/null
@@ -1,31 +0,0 @@
----
-title: system.time() function
-description: The `system.time()` function returns the current system time.
-aliases:
- - /v2.0/reference/flux/functions/misc/systemtime
- - /v2.0/reference/flux/functions/built-in/misc/systemtime
-menu:
- v2_0_ref:
- name: system.time
- parent: System
-weight: 401
----
-
-The `system.time()` function returns the current system time.
-
-_**Function type:** Date/Time_
-_**Output data type:** Timestamp_
-
-```js
-import "system"
-
-system.time()
-```
-
-## Examples
-```js
-import "system"
-
-data
- |> set(key: "processed_at", value: string(v: system.time() ))
-```
diff --git a/content/v2.0/reference/flux/language/assignment-scope.md b/content/v2.0/reference/flux/language/assignment-scope.md
index d9ffa74ef..2b20f7ce8 100644
--- a/content/v2.0/reference/flux/language/assignment-scope.md
+++ b/content/v2.0/reference/flux/language/assignment-scope.md
@@ -8,12 +8,6 @@ menu:
weight: 202
---
-{{% note %}}
-This document is a living document and may not represent the current implementation of Flux.
-Any section that is not currently implemented is commented with a **[IMPL#XXX]** where
-**XXX** is an issue number tracking discussion and progress towards implementation.
-{{% /note %}}
-
An assignment binds an identifier to a variable, option, or function.
Every identifier in a program must be assigned.
@@ -32,10 +26,6 @@ Note that the package clause is not an assignment.
The package name does not appear in any scope.
Its purpose is to identify the files belonging to the same package and to specify the default package name for import declarations.
-{{% note %}}
-[IMPL#247](https://github.com/influxdata/platform/issues/247) Add package/namespace support.
-{{% /note %}}
-
## Variable assignment
A variable assignment creates a variable bound to an identifier and gives it a type and value.
A variable keeps the same type and value for the remainder of its lifetime.
@@ -48,6 +38,10 @@ VariableAssignment = identifier "=" Expression
##### Examples of variable assignment
+{{% note %}}
+In this code snippet, `n` and `m` are defined in an outer block as integers. Within the anonymous function, `n` and `m` are defined as strings, but only within that scope. So while the function will return `"ab"`, `n` and `m` in the outer scope are unchanged, remaining `n = 1` and `m = 2`.
+{{% /note %}}
+
```js
n = 1
m = 2
@@ -55,7 +49,7 @@ x = 5.4
f = () => {
n = "a"
m = "b"
- return a + b
+ return n + m
}
```
diff --git a/content/v2.0/reference/flux/language/built-ins/_index.md b/content/v2.0/reference/flux/language/built-ins/_index.md
deleted file mode 100644
index fb35c8b71..000000000
--- a/content/v2.0/reference/flux/language/built-ins/_index.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-title: Built-ins
-description: >
- Flux contains many preassigned values.
- These preassigned values are defined in the source files for the various built-in packages.
-menu:
- v2_0_ref:
- name: Built-ins
- parent: Flux specification
-weight: 208
----
-
-Flux contains many preassigned values.
-These preassigned values are defined in the source files for the various built-in packages.
-
-{{< children >}}
diff --git a/content/v2.0/reference/flux/language/built-ins/time-constants.md b/content/v2.0/reference/flux/language/built-ins/time-constants.md
deleted file mode 100644
index 7d0dae6ac..000000000
--- a/content/v2.0/reference/flux/language/built-ins/time-constants.md
+++ /dev/null
@@ -1,56 +0,0 @@
----
-title: Time constants
-description: >
- Flux provides built-in time constants for days of the week and months of the year.
-menu:
- v2_0_ref:
- name: Time constants
- parent: Built-ins
-weight: 301
----
-
-{{% note %}}
-This document is a living document and may not represent the current implementation of Flux.
-Any section that is not currently implemented is commented with a **[IMPL#XXX]** where
-**XXX** is an issue number tracking discussion and progress towards implementation.
-{{% /note %}}
-
-## Days of the week
-Days of the week are represented as integers in the range `[0-6]`.
-The following builtin values are defined:
-
-```js
-Sunday = 0
-Monday = 1
-Tuesday = 2
-Wednesday = 3
-Thursday = 4
-Friday = 5
-Saturday = 6
-```
-
-{{% note %}}
-[IMPL#153](https://github.com/influxdata/flux/issues/153) Add Days of the Week constants
-{{% /note %}}
-
-## Months of the year
-Months are represented as integers in the range `[1-12]`.
-The following builtin values are defined:
-```js
-January = 1
-February = 2
-March = 3
-April = 4
-May = 5
-June = 6
-July = 7
-August = 8
-September = 9
-October = 10
-November = 11
-December = 12
-```
-
-{{% note %}}
-[IMPL#154](https://github.com/influxdata/flux/issues/154) Add Months of the Year constants
-{{% /note %}}
diff --git a/content/v2.0/reference/flux/language/data-model.md b/content/v2.0/reference/flux/language/data-model.md
index 07b25e8df..43575b3d9 100644
--- a/content/v2.0/reference/flux/language/data-model.md
+++ b/content/v2.0/reference/flux/language/data-model.md
@@ -18,9 +18,11 @@ Flux employs a basic data model built from basic data types.
The data model consists of tables, records, columns and streams.
## Record
+
A **record** is a tuple of named values and is represented using an object type.
## Column
+
A **column** has a label and a data type.
The available data types for a column are:
@@ -36,6 +38,7 @@ The available data types for a column are:
| duration | A nanosecond precision duration of time. |
## Table
+
A **table** is set of records with a common set of columns and a group key.
The group key is a list of columns.
@@ -50,24 +53,60 @@ A tables schema consists of its group key and its columns' labels and types.
{{% /note %}}
## Stream of tables
+
A **stream** represents a potentially unbounded set of tables.
A stream is grouped into individual tables using their respective group keys.
Tables within a stream each have a unique group key value.
{{% note %}}
-[IMPL#463](https://github.com/influxdata/flux/issues/463) Specify the primitive types that make up stream and table types
+[IMPL#463](https://github.com/influxdata/flux/issues/463) Specify the primitive
+types that make up stream and table types
{{% /note %}}
-## Missing values
-A record may be missing a value for a specific column.
-Missing values are represented with a special `null` value.
-The `null` value can be of any data type.
+## Missing values (null)
+
+`null` is a predeclared identifier representing a missing or unknown value.
+`null` is the only value comprising the _null type_.
+Any non-boolean operator that operates on basic types returns _null_ when at least one of its operands is _null_.
+
+Think of _null_ as an unknown value.
+The following table explains how _null_ values behave in expressions:
+
+| Expression | Evaluates To | Because |
+| ---------------- | ------------ | ----------------------------------------------------------------------------------- |
+| `null + 5` | `null` | Adding 5 to an unknown value is still unknown |
+| `null * 5` | `null` | Multiplying an unknown value by 5 is still unknown |
+| `null == 5` | `null` | We don't know if an unknown value is equal to 5 |
+| `null < 5` | `null` | We don't know if an unknown value is less than 5 |
+| `null == null` | `null` | We don't know if something unknown is equal to something else that is also unknown |
+
+Operating on something unknown produces something that is still unknown.
+The only place where this is not the case is in boolean logic.
+Because boolean types are nullable, Flux implements ternary logic as a way of handling boolean operators with _null_ operands.
+By interpreting a _null_ operand as an unknown value, we have the following definitions:
+
+- not _null_ = _null_
+- _null_ or false = _null_
+- _null_ or true = true
+- _null_ or _null_ = _null_
+- _null_ and false = false
+- _null_ and true = _null_
+- _null_ and _null_ = _null_
+
+Because records are represented using object types, attempting to access a column
+whose value is unknown or missing from a record will also return _null_.
{{% note %}}
-[IMPL#723](https://github.com/influxdata/platform/issues/723) Design how nulls behave
+According to the definitions above, it is not possible to check if an expression is _null_ using the `==` and `!=` operators.
+These operators will return _null_ if any of their operands are _null_.
+In order to perform such a check, Flux provides a built-in `exists` operator:
+
+- `exists x` returns false if `x` is _null_
+- `exists x` returns true if `x` is not _null_
{{% /note %}}
## Transformations
+
Transformations define a change to a stream.
Transformations may consume an input stream and always produce a new output stream.
The output stream group keys have a stable output order based on the input stream.
@@ -78,3 +117,25 @@ Transformations that modify group keys or values regroup the tables in the outpu
A transformation produces side effects when constructed from a function that produces side effects.
Transformations are represented using function types.
+
+### Match parameter names
+
+Some transformations (for example, `map` and `filter`) are represented using higher-order functions (functions that accept other functions).
+Each argument passed into a function must match the parameter name defined for the function.
+
+For example, `filter` accepts `fn`, which takes one argument named `r`:
+
+ ```js
+from(bucket: "db")
+ |> filter(fn: (r) => ...)
+```
+
+ If a parameter is renamed from `r` to `v`, the script fails:
+
+```js
+from(bucket: "db")
+ |> filter(fn: (v) => ...)
+ // FAILS!: 'v' != 'r'.
+```
+
+Because Flux does not support positional arguments, parameter names matter. The transformation (in this case, `filter`) must know `r` is the parameter name to successfully invoke the function.
diff --git a/content/v2.0/reference/flux/language/expressions.md b/content/v2.0/reference/flux/language/expressions.md
index ea43d4e7c..1dc60a210 100644
--- a/content/v2.0/reference/flux/language/expressions.md
+++ b/content/v2.0/reference/flux/language/expressions.md
@@ -22,6 +22,7 @@ PrimaryExpression = identifier | Literal | "(" Expression ")" .
```
## Literals
+
Literals construct a value.
```js
@@ -37,13 +38,24 @@ Literal = int_lit
```
### Object literals
+
Object literals construct a value with the object type.
```js
-ObjectLiteral = "{" PropertyList "}" .
-PropertyList = [ Property { "," Property } ] .
-Property = identifier [ ":" Expression ]
- | string_lit ":" Expression .
+ObjectLiteral = "{" ObjectBody "}" .
+ObjectBody = WithProperties | PropertyList .
+WithProperties = identifier "with" PropertyList .
+PropertyList = [ Property { "," Property } ] .
+Property = identifier [ ":" Expression ]
+ | string_lit ":" Expression .
+```
+
+**Examples**
+```js
+{a: 1, b: 2, c: 3}
+{a, b, c}
+{o with x: 5, y: 5}
+{o with a, b}
```
### Array literals
@@ -111,6 +123,23 @@ f(a:1, b:9.6)
float(v:1)
```
+Use short notation in a call expression when the name of every argument matches the name of every parameter.
+
+##### Examples of short notation in call expressions
+
+```js
+add(a: a, b: b) //long notation
+add(a, b) // short notation equivalent
+
+add = (a,b) => a + b
+a = 1
+b = 2
+
+// Don't mix short and long notation.
+add(a: a, b)
+add(a, b: b)
+```
+
## Pipe expressions
A _pipe expression_ is a call expression with an implicit piped argument.
@@ -142,10 +171,21 @@ IndexExpression = "[" Expression "]" .
## Member expressions
Member expressions access a property of an object.
+They are specified using an expression in one of the following forms:
+
+```js
+obj.k
+// or
+obj["k"]
+```
+
The property being accessed must be either an identifier or a string literal.
In either case the literal value is the name of the property being accessed, the identifier is not evaluated.
It is not possible to access an object's property using an arbitrary expression.
+If `obj` contains an entry with property `k`, both `obj.k` and `obj["k"]` return the value associated with `k`.
+If `obj` does **not** contain an entry with property `k`, both `obj.k` and `obj["k"]` return _null_.
+
```js
MemberExpression = DotExpression | MemberBracketExpression
DotExpression = "." identifer
@@ -167,26 +207,33 @@ ConditionalExpression = "if" Expression "then" Expression "else" Expression .
color = if code == 0 then "green" else if code == 1 then "yellow" else "red"
```
+{{% note %}}
+According to the definition above, if a condition evaluates to a _null_ or unknown value,
+the _else_ branch is evaluated.
+{{% /note %}}
+
## Operators
Operators combine operands into expressions.
The precedence of the operators is given in the table below.
Operators with a lower number have higher precedence.
-| Precedence | Operator | Description |
-|:----------:|:--------: |:--------------------------|
-| 1 | `a()` | Function call |
-| | `a[]` | Member or index access |
-| | `.` | Member access |
-| 2 | `*` `/` |Multiplication and division|
-| 3 | `+` `-` | Addition and subtraction |
-| 4 |`==` `!=` | Comparison operators |
-| | `<` `<=` | |
-| | `>` `>=` | |
-| |`=~` `!~` | |
-| 5 | `not` | Unary logical expression |
-| 6 | `and` | Logical AND |
-| 7 | `or` | Logical OR |
-| 8 | `if` `then` `else` | Conditional |
+| Precedence | Operator | Description |
+|:----------:|:--------: |:-------------------------- |
+| 1 | `a()` | Function call |
+| | `a[]` | Member or index access |
+| | `.` | Member access |
+| 2 | `^` | Exponentiation |
+| 3 | `*` `/` `%` | Multiplication, division, and modulo |
+| 4 | `+` `-` | Addition and subtraction |
+| 5 |`==` `!=` | Comparison operators |
+| | `<` `<=` | |
+| | `>` `>=` | |
+| |`=~` `!~` | |
+| 6 | `not` | Unary logical operator |
+| | `exists` | Null check operator |
+| 7 | `and` | Logical AND |
+| 8 | `or` | Logical OR |
+| 9 | `if` `then` `else` | Conditional |
The operator precedence is encoded directly into the grammar as the following.
@@ -199,7 +246,7 @@ LogicalExpression = UnaryLogicalExpression
LogicalOperator = "and" | "or" .
UnaryLogicalExpression = ComparisonExpression
| UnaryLogicalOperator UnaryLogicalExpression .
-UnaryLogicalOperator = "not" .
+UnaryLogicalOperator = "not" | "exists".
ComparisonExpression = MultiplicativeExpression
| ComparisonExpression ComparisonOperator MultiplicativeExpression .
ComparisonOperator = "==" | "!=" | "<" | "<=" | ">" | ">=" | "=~" | "!~" .
@@ -208,7 +255,7 @@ AdditiveExpression = MultiplicativeExpression
AdditiveOperator = "+" | "-" .
MultiplicativeExpression = PipeExpression
| MultiplicativeExpression MultiplicativeOperator PipeExpression .
-MultiplicativeOperator = "*" | "/" .
+MultiplicativeOperator = "*" | "/" | "%" | "^".
PipeExpression = PostfixExpression
| PipeExpression PipeOperator UnaryExpression .
PipeOperator = "|>" .
@@ -222,4 +269,8 @@ PostfixOperator = MemberExpression
| IndexExpression .
```
+{{% warn %}}
+Dividing by 0 or using the mod operator with a divisor of 0 will result in an error.
+{{% /warn %}}
+
_Also see [Flux Operators](/v2.0/reference/flux/language/operators)._
diff --git a/content/v2.0/reference/flux/language/lexical-elements.md b/content/v2.0/reference/flux/language/lexical-elements.md
index ee588967b..c39f9f7b6 100644
--- a/content/v2.0/reference/flux/language/lexical-elements.md
+++ b/content/v2.0/reference/flux/language/lexical-elements.md
@@ -269,8 +269,7 @@ String literals support several escape sequences.
\t U+0009 horizontal tab
\" U+0022 double quote
\\ U+005C backslash
-\{ U+007B open curly bracket
-\} U+007D close curly bracket
+\${ U+0024 U+007B dollar sign and opening curly bracket
```
Additionally, any byte value may be specified via a hex encoding using `\x` as the prefix.
@@ -281,15 +280,9 @@ byte_value = `\` "x" hex_digit hex_digit .
hex_digit = "0" … "9" | "A" … "F" | "a" … "f" .
unicode_value = unicode_char | escaped_char .
escaped_char = `\` ( "n" | "r" | "t" | `\` | `"` ) .
-StringExpression = "{" Expression "}" .
+StringExpression = "${" Expression "}" .
```
-{{% note %}}
-To be added: TODO: With string interpolation `string_lit` is not longer a lexical token as part of a literal, but an entire expression in and of itself.
-
-[IMPL#252](https://github.com/influxdata/platform/issues/252) Parse string literals.
-{{% /note %}}
-
##### Examples of string literals
```js
@@ -301,12 +294,12 @@ To be added: TODO: With string interpolation `string_lit` is not longer a lexica
```
String literals are also interpolated for embedded expressions to be evaluated as strings.
-Embedded expressions are enclosed in curly brackets (`{}`).
+Embedded expressions are enclosed in a dollar sign and curly braces (`${}`).
The expressions are evaluated in the scope containing the string literal.
-The result of an expression is formatted as a string and replaces the string content between the brackets.
+The result of an expression is formatted as a string and replaces the string content between the braces.
All types are formatted as strings according to their literal representation.
A function `printf` exists to allow more precise control over formatting of various types.
-To include the literal curly brackets within a string they must be escaped.
+To include the literal `${` within a string, it must be escaped.
{{% note %}}
[IMPL#248](https://github.com/influxdata/platform/issues/248) Add printf function.
@@ -316,14 +309,13 @@ To include the literal curly brackets within a string they must be escaped.
```js
n = 42
-"the answer is {n}" // the answer is 42
-"the answer is not {n+1}" // the answer is not 43
-"openinng curly bracket \{" // openinng curly bracket {
-"closing curly bracket \}" // closing curly bracket }
+"the answer is ${n}" // the answer is 42
+"the answer is not ${n+1}" // the answer is not 43
+"dollar sign opening curly bracket \${" // dollar sign opening curly bracket ${
```
{{% note %}}
-[IMPL#251](https://github.com/influxdata/platform/issues/251) Add string interpolation support
+[IMPL#1775](https://github.com/influxdata/flux/issues/1775) Interpolate arbitrary expressions in string literals
{{% /note %}}
### Regular expression literals
diff --git a/content/v2.0/reference/flux/language/operators.md b/content/v2.0/reference/flux/language/operators.md
index c448d1f45..0daf8d919 100644
--- a/content/v2.0/reference/flux/language/operators.md
+++ b/content/v2.0/reference/flux/language/operators.md
@@ -34,7 +34,8 @@ perform a calculation that returns a single numerical value.
| `-` | Subtraction | `3 - 2` | `1` |
| `*` | Multiplication | `2 * 3` | `6` |
| `/` | Division | `9 / 3` | `3` |
-| `%` | Modulus | `10 % 5` | `0` |
+| `^` | Exponentiation | `2 ^ 3` | `8` |
+| `%` | Modulo | `10 % 5` | `0` |
{{% note %}}
In the current version of Flux, values used in arithmetic operations must
@@ -129,17 +130,19 @@ Literal constructors define fixed values.
The table below outlines operator precedence.
Operators with a lower number have higher precedence.
-|Precedence | Operator | Description |
-|:--------- |:--------: |:----------- |
-| 1 | `a()` | Function call |
-| | `a[]` | Member or index access |
-| | `.` | Member access |
-| 2 | `*` `/` | Multiplication and division|
-| 3 | `+` `-` | Addition and subtraction |
-| 4 | `==` `!=` | Comparison operators |
-| | `<` `<=` | |
-| | `>` `>=` | |
-| | `=~` `!~` | |
-| 5 | `not` | Unary logical expression |
-| 6 | `and` | Logical AND |
-| 7 | `or` | Logical OR |
+| Precedence | Operator | Description |
+|:----------:|:--------: |:--------------------------|
+| 1 | `a()` | Function call |
+| | `a[]` | Member or index access |
+| | `.` | Member access |
+| 2 | `*` `/` |Multiplication and division|
+| 3 | `+` `-` | Addition and subtraction |
+| 4 |`==` `!=` | Comparison operators |
+| | `<` `<=` | |
+| | `>` `>=` | |
+| |`=~` `!~` | |
+| 5 | `not` | Unary logical operator |
+| | `exists` | Null check operator |
+| 6 | `and` | Logical AND |
+| 7 | `or` | Logical OR |
+| 8 | `if` `then` `else` | Conditional |
diff --git a/content/v2.0/reference/flux/language/packages.md b/content/v2.0/reference/flux/language/packages.md
index ce7d0f459..a04d6173c 100644
--- a/content/v2.0/reference/flux/language/packages.md
+++ b/content/v2.0/reference/flux/language/packages.md
@@ -13,12 +13,6 @@ menu:
weight: 207
---
-{{% note %}}
-This document is a living document and may not represent the current implementation of Flux.
-Any section that is not currently implemented is commented with a **[IMPL#XXX]** where
-**XXX** is an issue number tracking discussion and progress towards implementation.
-{{% /note %}}
-
Flux source is organized into packages.
A package consists of one or more source files.
Each source file is parsed individually and composed into a single package.
@@ -41,10 +35,6 @@ All files in the same package must declare the same package name.
When a file does not declare a package clause, all identifiers in that
file will belong to the special `main` package.
-{{% note %}}
-[IMPL#247](https://github.com/influxdata/platform/issues/247) Add package/namespace support.
-{{% /note %}}
-
### Package main
The `main` package is special for a few reasons:
diff --git a/content/v2.0/reference/flux/language/statements.md b/content/v2.0/reference/flux/language/statements.md
index 5ae6b94c0..e3335228b 100644
--- a/content/v2.0/reference/flux/language/statements.md
+++ b/content/v2.0/reference/flux/language/statements.md
@@ -119,6 +119,7 @@ duration // duration of time
time // time
string // utf-8 encoded string
regexp // regular expression
+bytes // sequence of byte values
type // a type that itself describes a type
```
diff --git a/content/v2.0/reference/flux/language/string-interpolation.md b/content/v2.0/reference/flux/language/string-interpolation.md
new file mode 100644
index 000000000..fcb2ea593
--- /dev/null
+++ b/content/v2.0/reference/flux/language/string-interpolation.md
@@ -0,0 +1,96 @@
+---
+title: String interpolation
+description: >
+ Flux string interpolation evaluates string literals containing one or more placeholders
+ and returns a result with placeholders replaced with their corresponding values.
+menu:
+ v2_0_ref:
+ parent: Flux specification
+ name: String interpolation
+weight: 211
+---
+
+Flux string interpolation evaluates string literals containing one or more placeholders
+and returns a result with placeholders replaced with their corresponding values.
+
+## String interpolation syntax
+To use Flux string interpolation, enclose embedded [expressions](/v2.0/reference/flux/language/expressions/)
+in a dollar sign and curly braces `${}`.
+Flux replaces the content between the braces with the result of the expression and
+returns a string literal.
+
+```js
+name = "John"
+
+"My name is ${name}."
+
+// My name is John.
+```
+
+{{% note %}}
+#### Flux only interpolates string values
+Flux currently interpolates only string values ([IMP#1775](https://github.com/influxdata/flux/issues/1775)).
+Use the [string() function](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/string/)
+to convert non-string values to strings.
+
+```js
+count = 12
+
+"I currently have ${string(v: count)} cats."
+```
+{{% /note %}}
+
+
+## Use dot notation to interpolate object values
+[Objects](/v2.0/reference/flux/language/expressions/#object-literals) consist of key-value pairs.
+Use [dot notation](/v2.0/reference/flux/language/expressions/#member-expressions)
+to interpolate values from an object.
+
+```js
+person = {
+ name: "John",
+ age: 42
+}
+
+"My name is ${person.name} and I'm ${string(v: person.age)} years old."
+
+// My name is John and I'm 42 years old.
+```
+
+Flux returns each record in query results as an object.
+In Flux row functions, each row object is represented by `r`.
+Use dot notation to interpolate specific column values from the `r` object.
+
+##### Use string interpolation to add a human-readable message
+```js
+from(bucket: "example-bucket")
+ |> range(start: -30m)
+ |> map(fn: (r) => ({
+ r with
+ human-readable: "${r._field} is ${r._value} at ${string(v: r._time)}."
+ }))
+```
+
+## String interpolation versus concatenation
+Flux supports both string interpolation and string concatenation.
+String interpolation is a more concise method for achieving the same result.
+
+```js
+person = {
+ name: "John",
+ age: 42
+}
+
+// String interpolation
+"My name is ${person.name} and I'm ${string(v: person.age)} years old."
+
+// String concatenation
+"My name is " + person.name + " and I'm " + string(v: person.age) + " years old."
+
+// Both return: My name is John and I'm 42 years old.
+```
+
+{{% note %}}
+Check and notification message templates configured in the InfluxDB user interface
+**do not** support string concatenation.
+{{% /note %}}
diff --git a/content/v2.0/reference/flux/language/built-ins/system-built-ins.md b/content/v2.0/reference/flux/language/system-built-ins.md
similarity index 85%
rename from content/v2.0/reference/flux/language/built-ins/system-built-ins.md
rename to content/v2.0/reference/flux/language/system-built-ins.md
index 933e09fb8..de25578e2 100644
--- a/content/v2.0/reference/flux/language/built-ins/system-built-ins.md
+++ b/content/v2.0/reference/flux/language/system-built-ins.md
@@ -3,11 +3,13 @@ title: System built-ins
description: >
When a built-in value is not expressible in Flux, its value may be defined by the hosting environment.
All such values must have a corresponding builtin statement to declare the existence and type of the built-in value.
+aliases:
+ - /v2.0/reference/flux/language/built-ins/system-built-ins/
menu:
v2_0_ref:
name: System built-ins
- parent: Built-ins
-weight: 301
+ parent: Flux specification
+weight: 206
---
When a built-in value is not expressible in Flux, its value may be defined by the hosting environment.
diff --git a/content/v2.0/reference/flux/language/types.md b/content/v2.0/reference/flux/language/types.md
index 4e33dbe62..495e6450a 100644
--- a/content/v2.0/reference/flux/language/types.md
+++ b/content/v2.0/reference/flux/language/types.md
@@ -18,89 +18,127 @@ A _type_ defines the set of values and operations on those values.
Types are never explicitly declared as part of the syntax.
Types are always inferred from the usage of the value.
-{{% note %}}
-[IMPL#249](https://github.com/influxdata/platform/issues/249) Specify type inference rules.
-{{% /note %}}
+## Union types
+A union type defines a set of types.
+In the examples below, a union type is specified as follows:
-## Boolean types
+```js
+T = t1 | t2 | ... | tn
+```
+where `t1`, `t2`, ..., and `tn` are types.
+
+In the example above a value of type `T` is either of type `t1`, type `t2`, ..., or type `tn`.
+
+## Basic types
+All Flux data types are constructed from the following types:
+
+### Null types
+The **null type** represents a missing or unknown value.
+The **null type** name is `null`.
+There is only one value that comprises the _null type_ and that is the _null_ value.
+A type `t` is nullable if it can be expressed as follows:
+
+```js
+t = {s} | null
+```
+
+where `{s}` defines a set of values.
+
+### Boolean types
A _boolean type_ represents a truth value, corresponding to the preassigned variables `true` and `false`.
The boolean type name is `bool`.
+The boolean type is nullable and can be formally specified as follows:
-## Numeric types
+```js
+bool = {true, false} | null
+```
+### Numeric types
A _numeric type_ represents sets of integer or floating-point values.
The following numeric types exist:
```
-uint the set of all unsigned 64-bit integers
-int the set of all signed 64-bit integers
-float the set of all IEEE-754 64-bit floating-point numbers
+uint the set of all unsigned 64-bit integers | null
+int the set of all signed 64-bit integers | null
+float the set of all IEEE-754 64-bit floating-point numbers | null
```
-## Time types
+{{% note %}}
+All numeric types are nullable.
+{{% /note %}}
+### Time types
A _time type_ represents a single point in time with nanosecond precision.
The time type name is `time`.
+The time type is nullable.
-### Timestamp format
+#### Timestamp format
Flux supports [RFC3339](https://tools.ietf.org/html/rfc3339#section-5.6) timestamps:
- `YYYY-MM-DD`
- `YYYY-MM-DDT00:00:00Z`
- `YYYY-MM-DDT00:00:00.000Z`
-## Duration types
-
+### Duration types
A _duration type_ represents a length of time with nanosecond precision.
The duration type name is `duration`.
+The duration type is nullable
Durations can be added to times to produce a new time.
##### Examples of duration types
-
```js
2018-07-01T00:00:00Z + 1mo // 2018-08-01T00:00:00Z
2018-07-01T00:00:00Z + 2y // 2020-07-01T00:00:00Z
2018-07-01T00:00:00Z + 5h // 2018-07-01T05:00:00Z
```
-## String types
-
+### String types
A _string type_ represents a possibly empty sequence of characters.
Strings are immutable and cannot be modified once created.
The string type name is `string`.
+The string type is nullable.
-The length of a string is its size in bytes, not the number of characters, since a single character may be multiple bytes.
+{{% note %}}
+An empty string is **not** a _null_ value.
+{{% /note %}}
+
+The length of a string is its size in bytes, not the number of characters,
+since a single character may be multiple bytes.
+
+### Bytes types
+A _bytes type_ represents a sequence of byte values.
+The bytes type name is `bytes`.
## Regular expression types
-
A _regular expression type_ represents the set of all patterns for regular expressions.
The regular expression type name is `regexp`.
+The regular expression type is **not** nullable.
-## Array types
+## Composite types
+These are types constructed from basic types.
+Composite types are not nullable.
+### Array types
An _array type_ represents a sequence of values of any other type.
All values in the array must be of the same type.
The length of an array is the number of elements in the array.
-## Object types
-
+### Object types
An _object type_ represents a set of unordered key and value pairs.
The key must always be a string.
The value may be any other type, and need not be the same as other values within the object.
-## Function types
-
+### Function types
A _function type_ represents a set of all functions with the same argument and result types.
{{% note %}}
[IMPL#249](https://github.com/influxdata/platform/issues/249) Specify type inference rules.
{{% /note %}}
-## Generator types
-
+### Generator types
A _generator type_ represents a value that produces an unknown number of other values.
The generated values may be of any other type, but must all be the same type.
diff --git a/content/v2.0/reference/flux/stdlib/_index.md b/content/v2.0/reference/flux/stdlib/_index.md
new file mode 100644
index 000000000..a3b1b89e1
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/_index.md
@@ -0,0 +1,18 @@
+---
+title: Flux standard library
+description: >
+ The Flux standard library includes built-in functions and importable packages
+ that retrieve, transform, process, and output data.
+aliases:
+ - /v2.0/reference/flux/functions/
+v2.0/tags: [flux, functions, package]
+menu:
+ v2_0_ref:
+ parent: Flux query language
+weight: 102
+---
+
+The Flux standard library includes built-in functions and importable packages
+that retrieve, transform,rocess, and output data.
+
+{{< children >}}
diff --git a/content/v2.0/reference/flux/functions/all-functions.md b/content/v2.0/reference/flux/stdlib/all-functions.md
similarity index 73%
rename from content/v2.0/reference/flux/functions/all-functions.md
rename to content/v2.0/reference/flux/stdlib/all-functions.md
index f0a0f821a..19e879712 100644
--- a/content/v2.0/reference/flux/functions/all-functions.md
+++ b/content/v2.0/reference/flux/stdlib/all-functions.md
@@ -1,10 +1,12 @@
---
title: Complete list of Flux functions
description: View the full library of documented Flux functions.
+aliases:
+ - /v2.0/reference/flux/functions/all-functions/
menu:
v2_0_ref:
name: View all functions
- parent: Flux packages and functions
+ parent: Flux standard library
weight: 299
---
diff --git a/content/v2.0/reference/flux/functions/built-in/_index.md b/content/v2.0/reference/flux/stdlib/built-in/_index.md
similarity index 87%
rename from content/v2.0/reference/flux/functions/built-in/_index.md
rename to content/v2.0/reference/flux/stdlib/built-in/_index.md
index d46c12140..7d3a305d4 100644
--- a/content/v2.0/reference/flux/functions/built-in/_index.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/_index.md
@@ -4,10 +4,12 @@ list_title: Built-in functions
description: >
Built-in functions provide a foundation for working with data using Flux.
They do not require an import statement and are usable without any extra setup.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/
menu:
v2_0_ref:
name: Built-in
- parent: Flux packages and functions
+ parent: Flux standard library
weight: 201
v2.0/tags: [built-in, functions, package]
---
diff --git a/content/v2.0/reference/flux/functions/built-in/inputs/_index.md b/content/v2.0/reference/flux/stdlib/built-in/inputs/_index.md
similarity index 84%
rename from content/v2.0/reference/flux/functions/built-in/inputs/_index.md
rename to content/v2.0/reference/flux/stdlib/built-in/inputs/_index.md
index 064ac6442..ae684634a 100644
--- a/content/v2.0/reference/flux/functions/built-in/inputs/_index.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/inputs/_index.md
@@ -3,7 +3,8 @@ title: Flux built-in input functions
list_title: Built-in input functions
description: Flux's built-in input functions define sources of data or or display information about data sources.
aliases:
- - /v2.0/reference/flux/functions/inputs
+ - /v2.0/reference/flux/functions/inputs
+ - /v2.0/reference/flux/functions/built-in/inputs/
menu:
v2_0_ref:
parent: Built-in
diff --git a/content/v2.0/reference/flux/functions/built-in/inputs/buckets.md b/content/v2.0/reference/flux/stdlib/built-in/inputs/buckets.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/built-in/inputs/buckets.md
rename to content/v2.0/reference/flux/stdlib/built-in/inputs/buckets.md
index 79aa46686..c759c932f 100644
--- a/content/v2.0/reference/flux/functions/built-in/inputs/buckets.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/inputs/buckets.md
@@ -3,6 +3,7 @@ title: buckets() function
description: The `buckets()` function returns a list of buckets in the organization.
aliases:
- /v2.0/reference/flux/functions/inputs/buckets
+ - /v2.0/reference/flux/functions/built-in/inputs/buckets/
menu:
v2_0_ref:
name: buckets
diff --git a/content/v2.0/reference/flux/functions/built-in/inputs/from.md b/content/v2.0/reference/flux/stdlib/built-in/inputs/from.md
similarity index 95%
rename from content/v2.0/reference/flux/functions/built-in/inputs/from.md
rename to content/v2.0/reference/flux/stdlib/built-in/inputs/from.md
index f7cf1b583..6c9cbe39a 100644
--- a/content/v2.0/reference/flux/functions/built-in/inputs/from.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/inputs/from.md
@@ -3,6 +3,7 @@ title: from() function
description: The `from()` function retrieves data from an InfluxDB data source.
aliases:
- /v2.0/reference/flux/functions/inputs/from
+ - /v2.0/reference/flux/functions/built-in/inputs/from/
menu:
v2_0_ref:
name: from
diff --git a/content/v2.0/reference/flux/functions/built-in/misc/_index.md b/content/v2.0/reference/flux/stdlib/built-in/misc/_index.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/built-in/misc/_index.md
rename to content/v2.0/reference/flux/stdlib/built-in/misc/_index.md
index 0e5217ec5..3bedf0dfc 100644
--- a/content/v2.0/reference/flux/functions/built-in/misc/_index.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/misc/_index.md
@@ -6,6 +6,7 @@ description: >
retrieving, transforming, or outputting data.
aliases:
- /v2.0/reference/flux/functions/misc
+ - /v2.0/reference/flux/functions/built-in/misc/
menu:
v2_0_ref:
parent: Built-in
diff --git a/content/v2.0/reference/flux/functions/built-in/misc/intervals.md b/content/v2.0/reference/flux/stdlib/built-in/misc/intervals.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/built-in/misc/intervals.md
rename to content/v2.0/reference/flux/stdlib/built-in/misc/intervals.md
index d36acade3..3ee4a7f6c 100644
--- a/content/v2.0/reference/flux/functions/built-in/misc/intervals.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/misc/intervals.md
@@ -3,11 +3,13 @@ title: intervals() function
description: The `intervals()` function generates a set of time intervals over a range of time.
aliases:
- /v2.0/reference/flux/functions/misc/intervals
+ - /v2.0/reference/flux/functions/built-in/misc/intervals/
menu:
v2_0_ref:
name: intervals
parent: built-in-misc
weight: 401
+draft: true
---
The `intervals()` function generates a set of time intervals over a range of time.
@@ -19,7 +21,7 @@ The set of intervals includes all intervals that intersect with the initial rang
{{% note %}}
The `intervals()` function is designed to be used with the intervals parameter
-of the [`window()` function](/v2.0/reference/flux/functions/built-in/transformations/window).
+of the [`window()` function](/v2.0/reference/flux/stdlib/built-in/transformations/window).
{{% /note %}}
By default the end boundary of an interval will align with the Unix epoch (zero time)
diff --git a/content/v2.0/reference/flux/functions/built-in/misc/linearbins.md b/content/v2.0/reference/flux/stdlib/built-in/misc/linearbins.md
similarity index 87%
rename from content/v2.0/reference/flux/functions/built-in/misc/linearbins.md
rename to content/v2.0/reference/flux/stdlib/built-in/misc/linearbins.md
index 0df48b679..658ec2d5a 100644
--- a/content/v2.0/reference/flux/functions/built-in/misc/linearbins.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/misc/linearbins.md
@@ -3,6 +3,7 @@ title: linearBins() function
description: The `linearBins()` function generates a list of linearly separated floats.
aliases:
- /v2.0/reference/flux/functions/misc/linearbins
+ - /v2.0/reference/flux/functions/built-in/misc/linearbins/
menu:
v2_0_ref:
name: linearBins
@@ -12,7 +13,7 @@ weight: 401
The `linearBins()` function generates a list of linearly separated floats.
It is a helper function meant to generate bin bounds for the
-[`histogram()` function](/v2.0/reference/flux/functions/built-in/transformations/histogram).
+[`histogram()` function](/v2.0/reference/flux/stdlib/built-in/transformations/histogram).
_**Function type:** Miscellaneous_
_**Output data type:** Array of floats_
diff --git a/content/v2.0/reference/flux/functions/built-in/misc/logarithmicbins.md b/content/v2.0/reference/flux/stdlib/built-in/misc/logarithmicbins.md
similarity index 82%
rename from content/v2.0/reference/flux/functions/built-in/misc/logarithmicbins.md
rename to content/v2.0/reference/flux/stdlib/built-in/misc/logarithmicbins.md
index 5d6f7270c..5a6c2d59e 100644
--- a/content/v2.0/reference/flux/functions/built-in/misc/logarithmicbins.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/misc/logarithmicbins.md
@@ -3,6 +3,7 @@ title: logarithmicBins() function
description: The `logarithmicBins()` function generates a list of exponentially separated floats.
aliases:
- /v2.0/reference/flux/functions/misc/logarithmicbins
+ - /v2.0/reference/flux/functions/built-in/misc/logarithmicbins/
menu:
v2_0_ref:
name: logarithmicBins
@@ -12,7 +13,7 @@ weight: 401
The `logarithmicBins()` function generates a list of exponentially separated floats.
It is a helper function meant to generate bin bounds for the
-[`histogram()` function](/v2.0/reference/flux/functions/built-in/transformations/histogram).
+[`histogram()` function](/v2.0/reference/flux/stdlib/built-in/transformations/histogram).
_**Function type:** Miscellaneous_
_**Output data type:** Array of floats_
@@ -46,7 +47,7 @@ _**Data type:** Boolean_
## Examples
```js
-logarithmicBins(start: 1.0, factor: 2.0, count: 10, infinty: true)
+logarithmicBins(start: 1.0, factor: 2.0, count: 10, infinity: true)
// Generated list: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, +Inf]
```
diff --git a/content/v2.0/reference/flux/stdlib/built-in/misc/now.md b/content/v2.0/reference/flux/stdlib/built-in/misc/now.md
new file mode 100644
index 000000000..bcc5048c8
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/misc/now.md
@@ -0,0 +1,40 @@
+---
+title: now() function
+description: The `now()` function returns the current time (UTC).
+aliases:
+ - /v2.0/reference/flux/functions/built-in/misc/now/
+menu:
+ v2_0_ref:
+ name: now
+ parent: built-in-misc
+weight: 401
+related:
+ - /v2.0/reference/flux/stdlib/system/time/
+---
+
+The `now()` function returns the current time (UTC).
+
+_**Function type:** Date/Time_
+_**Output data type:** Time_
+
+```js
+now()
+```
+
+## Examples
+```js
+data
+ |> range(start: -10h, stop: now())
+```
+
+{{% note %}}
+#### now() vs system.time()
+`now()` returns the current UTC time.
+`now()` is cached at runtime, so all instances of `now()` in a Flux script
+return the same value.
+
+[`system.time()`](/v2.0/reference/flux/stdlib/system/time/) returns the current
+system time of the host machine, which typically accounts for the local time zone.
+This time represents the time at which `system.time()` it is executed, so each
+instance of `system.time()` in a Flux script returns a unique value.
+{{% /note %}}
diff --git a/content/v2.0/reference/flux/stdlib/built-in/misc/sleep.md b/content/v2.0/reference/flux/stdlib/built-in/misc/sleep.md
new file mode 100644
index 000000000..c143a4fd2
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/misc/sleep.md
@@ -0,0 +1,55 @@
+---
+title: sleep() function
+description: The `sleep()` function delays execution by a specified duration.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/misc/sleep/
+menu:
+ v2_0_ref:
+ name: sleep
+ parent: built-in-misc
+weight: 401
+---
+
+The `sleep()` function delays execution by a specified duration.
+
+_**Function type:** Miscellaneous_
+
+```js
+sleep(
+ v: x,
+ duration: 10s
+)
+```
+
+## Parameters
+
+### v
+Defines input tables.
+`sleep()` accepts piped-forward data and passes it on unmodified after the
+specified [duration](#duration).
+If data is not piped-forward into `sleep()`, set `v` to specify a stream object.
+The examples [below](#examples) illustrate how.
+
+_**Data type:** Object_
+
+### duration
+The length of time to delay execution.
+
+_**Data type:** Duration_
+
+## Examples
+
+### Delay execution in a chained query
+```js
+from(bucket: "example-bucket")
+ |> range(start: -1h)
+ |> sleep(duration: 10s)
+```
+
+### Delay execution using a stream variable
+```js
+x = from(bucket: "example-bucket")
+ |> range(start: -1h)
+
+sleep(v: x, duration: 10s)
+```
diff --git a/content/v2.0/reference/flux/functions/built-in/outputs/_index.md b/content/v2.0/reference/flux/stdlib/built-in/outputs/_index.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/built-in/outputs/_index.md
rename to content/v2.0/reference/flux/stdlib/built-in/outputs/_index.md
index 568746148..01432c2a4 100644
--- a/content/v2.0/reference/flux/functions/built-in/outputs/_index.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/outputs/_index.md
@@ -4,6 +4,7 @@ list_title: Built-in output functions
description: Flux's built-in output functions yield results or send data to a specified output destination.
aliases:
- /v2.0/reference/flux/functions/outputs
+ - /v2.0/reference/flux/functions/built-in/outputs/
menu:
v2_0_ref:
parent: Built-in
diff --git a/content/v2.0/reference/flux/functions/built-in/outputs/to.md b/content/v2.0/reference/flux/stdlib/built-in/outputs/to.md
similarity index 84%
rename from content/v2.0/reference/flux/functions/built-in/outputs/to.md
rename to content/v2.0/reference/flux/stdlib/built-in/outputs/to.md
index ed047a70f..63ec618b5 100644
--- a/content/v2.0/reference/flux/functions/built-in/outputs/to.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/outputs/to.md
@@ -3,6 +3,7 @@ title: to() function
description: The `to()` function writes data to an InfluxDB v2.0 bucket.
aliases:
- /v2.0/reference/flux/functions/outputs/to
+ - /v2.0/reference/flux/functions/built-in/outputs/to/
menu:
v2_0_ref:
name: to
@@ -35,7 +36,9 @@ to(
```
{{% note %}}
+
### Output data requirements
+
The `to()` function converts output data into line protocol and writes it to InfluxDB.
Line protocol requires each record to have a timestamp, a measurement, a field, and a value.
All output data must include the following columns:
@@ -47,35 +50,32 @@ All output data must include the following columns:
{{% /note %}}
## Parameters
+
{{% note %}}
-`bucket` OR `bucketID` is **required**.
+You must provide a `bucket` or `bucketID` and an `org` or `orgID`.
{{% /note %}}
### bucket
-The bucket to which data is written. Mutually exclusive with `bucketID`.
+The bucket to write data to.
+`bucket` and `bucketID` are mutually exclusive.
_**Data type:** String_
### bucketID
-The ID of the bucket to which data is written. Mutually exclusive with `bucket`.
+The ID of the bucket to write data to.
+`bucketID` and `bucket` are mutually exclusive.
_**Data type:** String_
### org
The organization name of the specified [`bucket`](#bucket).
-Only required when writing to a remote host.
-Mutually exclusive with `orgID`
+`org` and `orgID` are mutually exclusive.
_**Data type:** String_
-{{% note %}}
-Specify either an `org` or an `orgID`, but not both.
-{{% /note %}}
-
### orgID
The organization ID of the specified [`bucket`](#bucket).
-Only required when writing to a remote host.
-Mutually exclusive with `org`.
+`orgID` and `org` are mutually exclusive.
_**Data type:** String_
@@ -92,28 +92,39 @@ _Required when a `host` is specified._
_**Data type:** String_ -->
### timeColumn
+
The time column of the output.
Default is `"_time"`.
_**Data type:** String_
### tagColumns
+
The tag columns of the output.
-Defaults to all columns with type `string`, excluding all value columns and the `_field` column if present.
+Defaults to all columns with type `string`, excluding all value columns and the
+`_field` column if present.
_**Data type:** Array of strings_
### fieldFn
+
Function that takes a record from the input table and returns an object.
-For each record from the input table, `fieldFn` returns an object that maps output the field key to the output value.
+For each record from the input table, `fieldFn` returns an object that maps output
+the field key to the output value.
Default is `(r) => ({ [r._field]: r._value })`
_**Data type:** Function_
_**Output data type:** Object_
+{{% note %}}
+Make sure `fieldFn` parameter names match each specified parameter.
+To learn why, see [Match parameter names](/v2.0/reference/flux/language/data-model/#match-parameter-names).
+{{% /note %}}
+
## Examples
### Default to() operation
+
Given the following table:
| _time | _start | _stop | _measurement | _field | _value |
@@ -138,6 +149,7 @@ _measurement=a temp=99.9 0007
```
### Custom to() operation
+
The `to()` functions default operation can be overridden. For example, given the following table:
| _time | _start | _stop | tag1 | tag2 | hum | temp |
@@ -164,4 +176,5 @@ _tag1=a hum=55.5,temp=99.9 0007
##### Related InfluxQL functions and statements:
+
[SELECT INTO](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#the-into-clause)
diff --git a/content/v2.0/reference/flux/functions/built-in/outputs/yield.md b/content/v2.0/reference/flux/stdlib/built-in/outputs/yield.md
similarity index 95%
rename from content/v2.0/reference/flux/functions/built-in/outputs/yield.md
rename to content/v2.0/reference/flux/stdlib/built-in/outputs/yield.md
index 2c5c27ef7..8a736af63 100644
--- a/content/v2.0/reference/flux/functions/built-in/outputs/yield.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/outputs/yield.md
@@ -3,6 +3,7 @@ title: yield() function
description: The `yield()` function indicates the input tables received should be delivered as a result of the query.
aliases:
- /v2.0/reference/flux/functions/outputs/yield
+ - /v2.0/reference/flux/functions/built-in/outputs/yield/
menu:
v2_0_ref:
name: yield
diff --git a/content/v2.0/reference/flux/functions/built-in/tests/_index.md b/content/v2.0/reference/flux/stdlib/built-in/tests/_index.md
similarity index 89%
rename from content/v2.0/reference/flux/functions/built-in/tests/_index.md
rename to content/v2.0/reference/flux/stdlib/built-in/tests/_index.md
index 1dd1dcc77..91ad2fcda 100644
--- a/content/v2.0/reference/flux/functions/built-in/tests/_index.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/tests/_index.md
@@ -2,6 +2,8 @@
title: Flux built-in testing functions
list_title: Built-in testing functions
description: Flux's built-in testing functions test various aspects of piped-forward data.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/tests/
menu:
v2_0_ref:
name: Tests
diff --git a/content/v2.0/reference/flux/functions/built-in/tests/contains.md b/content/v2.0/reference/flux/stdlib/built-in/tests/contains.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/built-in/tests/contains.md
rename to content/v2.0/reference/flux/stdlib/built-in/tests/contains.md
index ab2810daf..88376e185 100644
--- a/content/v2.0/reference/flux/functions/built-in/tests/contains.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/tests/contains.md
@@ -1,6 +1,8 @@
---
title: contains() function
description: The `contains()` function tests whether a value is a member of a set.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/tests/contains/
menu:
v2_0_ref:
name: contains
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/_index.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/_index.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/built-in/transformations/_index.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/_index.md
index c75a50f9f..1b5ac3bfa 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/_index.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/_index.md
@@ -4,6 +4,7 @@ list_title: Built-in transformation functions
description: Flux's built-in transformation functions transform and shape your data in specific ways.
aliases:
- /v2.0/reference/flux/functions/transformations
+ - /v2.0/reference/flux/functions/built-in/transformations/
menu:
v2_0_ref:
parent: Built-in
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/_index.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/_index.md
similarity index 69%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/_index.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/_index.md
index 047f02769..b14244358 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/_index.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/_index.md
@@ -4,6 +4,7 @@ list_title: Built-in aggregate functions
description: Flux's built-in aggregate functions take values from an input table and aggregate them in some way.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/
menu:
v2_0_ref:
parent: built-in-transformations
@@ -29,7 +30,7 @@ Any output table will have the following properties:
- It will not have a `_time` column.
### aggregateWindow helper function
-The [`aggregateWindow()` function](/v2.0/reference/flux/functions/built-in/transformations/aggregates/aggregatewindow)
+The [`aggregateWindow()` function](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/aggregatewindow)
does most of the work needed when aggregating data.
It windows and aggregates the data, then combines windowed tables into a single output table.
@@ -43,9 +44,9 @@ The following functions are both aggregates and selectors.
Each returns `n` values after performing an aggregate operation.
They are categorized as selector functions in this documentation:
-- [highestAverage](/v2.0/reference/flux/functions/transformations/selectors/highestaverage)
-- [highestCurrent](/v2.0/reference/flux/functions/transformations/selectors/highestcurrent)
-- [highestMax](/v2.0/reference/flux/functions/transformations/selectors/highestmax)
-- [lowestAverage](/v2.0/reference/flux/functions/transformations/selectors/lowestaverage)
-- [lowestCurrent](/v2.0/reference/flux/functions/transformations/selectors/lowestcurrent)
-- [lowestMin](/v2.0/reference/flux/functions/transformations/selectors/lowestmin)
+- [highestAverage](/v2.0/reference/flux/stdlib/built-in/transformations/selectors/highestaverage)
+- [highestCurrent](/v2.0/reference/flux/stdlib/built-in/transformations/selectors/highestcurrent)
+- [highestMax](/v2.0/reference/flux/stdlib/built-in/transformations/selectors/highestmax)
+- [lowestAverage](/v2.0/reference/flux/stdlib/built-in/transformations/selectors/lowestaverage)
+- [lowestCurrent](/v2.0/reference/flux/stdlib/built-in/transformations/selectors/lowestcurrent)
+- [lowestMin](/v2.0/reference/flux/stdlib/built-in/transformations/selectors/lowestmin)
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/aggregatewindow.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/aggregatewindow.md
similarity index 64%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/aggregatewindow.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/aggregatewindow.md
index ec538dcc3..ce15d9685 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/aggregatewindow.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/aggregatewindow.md
@@ -3,6 +3,7 @@ title: aggregateWindow() function
description: The `aggregateWindow()` function applies an aggregate function to fixed windows of time.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/aggregatewindow
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/aggregatewindow/
menu:
v2_0_ref:
name: aggregateWindow
@@ -10,7 +11,8 @@ menu:
weight: 501
---
-The `aggregateWindow()` function applies an aggregate function to fixed windows of time.
+The `aggregateWindow()` function applies an aggregate or selector function
+(any function with a `column` parameter) to fixed windows of time.
_**Function type:** Aggregate_
@@ -25,45 +27,59 @@ aggregateWindow(
)
```
-As data is windowed into separate tables and aggregated, the `_time` column is dropped from each group key.
+As data is windowed into separate tables and processed, the `_time` column is dropped from each group key.
This function copies the timestamp from a remaining column into the `_time` column.
View the [function definition](#function-definition).
+`aggregateWindow()` restores the original `_start` and `_stop` values of input data
+and, by default, uses `_stop` to set the `_time` value for each aggregated window.
+Each row in the output of `aggregateWindow` represents an aggregated window ending at `_time`.
+
## Parameters
+{{% note %}}
+Make sure `fn` parameter names match each specified parameter. To learn why, see [Match parameter names](/v2.0/reference/flux/language/data-model/#match-parameter-names).
+{{% /note %}}
+
### every
+
The duration of windows.
_**Data type:** Duration_
### fn
-The [aggregate function](/v2.0/reference/flux/functions/built-in/transformations/aggregates) used in the operation.
+
+The [aggregate function](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates) used in the operation.
_**Data type:** Function_
{{% note %}}
-Only aggregate functions with a `column` parameter (singular) work with `aggregateWindow()`.
+Only aggregate and selector functions with a `column` parameter (singular) work with `aggregateWindow()`.
{{% /note %}}
### column
+
The column on which to operate.
Defaults to `"_value"`.
_**Data type:** String_
### timeSrc
+
The time column from which time is copied for the aggregate record.
Defaults to `"_stop"`.
_**Data type:** String_
### timeDst
+
The "time destination" column to which time is copied for the aggregate record.
Defaults to `"_time"`.
_**Data type:** String_
### createEmpty
+
For windows without data, this will create an empty window and fill
it with a `null` aggregate value.
Defaults to `true`.
@@ -73,6 +89,7 @@ _**Data type:** Boolean_
## Examples
###### Using an aggregate function with default parameters
+
```js
from(bucket: "example-bucket")
|> range(start: 1h)
@@ -84,10 +101,10 @@ from(bucket: "example-bucket")
fn: mean
)
```
-####### Specifying parameters of the aggregate function
-To use `aggregateWindow()` aggregate functions that don't provide defaults for required parameters,
-for the `fn` parameter, define an anonymous function with `columns` and `tables` parameters
-that pipe-forwards tables into the aggregate function with all required parameters defined:
+###### Specify parameters of the aggregate function
+To use functions that don't provide defaults for required parameters with `aggregateWindow()`,
+define an anonymous function with `column` and `tables` parameters that pipe-forward
+tables into the aggregate or selector function with all required parameters defined:
```js
from(bucket: "example-bucket")
@@ -103,6 +120,7 @@ from(bucket: "example-bucket")
```
## Function definition
+
```js
aggregateWindow = (every, fn, column="_value", timeSrc="_stop", timeDst="_time", tables=<-) =>
tables
@@ -115,5 +133,6 @@ aggregateWindow = (every, fn, column="_value", timeSrc="_stop", timeDst="_time",
##### Related InfluxQL functions and statements:
+
[InfluxQL aggregate functions](https://docs.influxdata.com/influxdb/latest/query_language/functions/#aggregations)
[GROUP BY time()](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#the-group-by-clause)
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/chandemomentumoscillator.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/chandemomentumoscillator.md
new file mode 100644
index 000000000..e3e03218a
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/chandemomentumoscillator.md
@@ -0,0 +1,112 @@
+---
+title: chandeMomentumOscillator() function
+description: >
+ The `chandeMomentumOscillator()` function applies the technical momentum indicator
+ developed by Tushar Chande.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/chandemomentumoscillator/
+menu:
+ v2_0_ref:
+ name: chandeMomentumOscillator
+ parent: built-in-aggregates
+weight: 501
+related:
+ - https://docs.influxdata.com/influxdb/latest/query_language/functions/#triple-exponential-moving-average, InfluxQL CHANDE_MOMENTUM_OSCILLATOR()
+---
+
+The `chandeMomentumOscillator()` function applies the technical momentum indicator
+developed by Tushar Chande.
+
+_**Function type:** Aggregate_
+
+```js
+chandeMomentumOscillator(
+ n: 10,
+ columns: ["_value"]
+)
+```
+
+The Chande Momentum Oscillator (CMO) indicator calculates the difference between
+the sum of all recent data points with values greater than the median value of the data set
+and the sum of all recent data points with values lower than the median value of the data set,
+then divides the result by the sum of all data movement over a given time period.
+It then multiplies the result by 100 and returns a value between -100 and +100.
+
+## Parameters
+
+### n
+The period or number of points to use in the calculation.
+
+_**Data type:** Integer_
+
+### columns
+The columns to operate on.
+Defaults to `["_value"]`.
+
+_**Data type: Array of Strings**_
+
+## Examples
+
+#### Table transformation with a ten point Chande Momentum Oscillator
+
+###### Input table
+| _time | _value |
+|:-----:|:------:|
+| 0001 | 1 |
+| 0002 | 2 |
+| 0003 | 3 |
+| 0004 | 4 |
+| 0005 | 5 |
+| 0006 | 6 |
+| 0007 | 7 |
+| 0008 | 8 |
+| 0009 | 9 |
+| 0010 | 10 |
+| 0011 | 11 |
+| 0012 | 12 |
+| 0013 | 13 |
+| 0014 | 14 |
+| 0015 | 15 |
+| 0016 | 14 |
+| 0017 | 13 |
+| 0018 | 12 |
+| 0019 | 11 |
+| 0020 | 10 |
+| 0021 | 9 |
+| 0022 | 8 |
+| 0023 | 7 |
+| 0024 | 6 |
+| 0025 | 5 |
+| 0026 | 4 |
+| 0027 | 3 |
+| 0028 | 2 |
+| 0029 | 1 |
+
+###### Query
+```js
+// ...
+ |> chandeMomentumOscillator(n: 10)
+```
+
+###### Output table
+| _time | _value |
+|:-----:|:------:|
+| 0011 | 100 |
+| 0012 | 100 |
+| 0013 | 100 |
+| 0014 | 100 |
+| 0015 | 100 |
+| 0016 | 80 |
+| 0017 | 60 |
+| 0018 | 40 |
+| 0019 | 20 |
+| 0020 | 0 |
+| 0021 | -20 |
+| 0022 | -40 |
+| 0023 | -60 |
+| 0024 | -80 |
+| 0025 | -100 |
+| 0026 | -100 |
+| 0027 | -100 |
+| 0028 | -100 |
+| 0029 | -100 |
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/count.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/count.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/count.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/count.md
index 3f30cd2f9..6aaccc552 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/count.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/count.md
@@ -3,6 +3,7 @@ title: count() function
description: The `count()` function outputs the number of non-null records in a column.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/count
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/count/
menu:
v2_0_ref:
name: count
@@ -26,7 +27,7 @@ count(column: "_value")
The column on which to operate.
Defaults to `"_value"`.
-_**Data type: String**_
+_**Data type:** String_
## Examples
```js
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/cov.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/cov.md
similarity index 95%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/cov.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/cov.md
index 708c9375c..af138115a 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/cov.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/cov.md
@@ -3,6 +3,7 @@ title: cov() function
description: The `cov()` function computes the covariance between two streams by first joining the streams, then performing the covariance operation.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/cov
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/cov/
menu:
v2_0_ref:
name: cov
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/covariance.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/covariance.md
similarity index 84%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/covariance.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/covariance.md
index 42b6b8034..f4e7ecbbc 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/covariance.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/covariance.md
@@ -3,6 +3,7 @@ title: covariance() function
description: The `covariance()` function computes the covariance between two columns.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/covariance
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/covariance/
menu:
v2_0_ref:
name: covariance
@@ -39,6 +40,7 @@ _**Data type:** String_
## Examples
```js
from(bucket: "example-bucket")
- |> range(start:-5m)
+ |> range(start:-5m)
+ |> map(fn: (r) => ({r with x: r._value, y: r._value * r._value / 2}))
|> covariance(columns: ["x", "y"])
```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/derivative.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/derivative.md
similarity index 94%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/derivative.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/derivative.md
index cffc3973b..fbfebd257 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/derivative.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/derivative.md
@@ -3,6 +3,7 @@ title: derivative() function
description: The `derivative()` function computes the rate of change per unit of time between subsequent non-null records.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/derivative
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/derivative/
menu:
v2_0_ref:
name: derivative
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/difference.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/difference.md
similarity index 72%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/difference.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/difference.md
index f864f6626..45179da67 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/difference.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/difference.md
@@ -3,6 +3,7 @@ title: difference() function
description: The `difference()` function computes the difference between subsequent non-null records.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/difference
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/difference/
menu:
v2_0_ref:
name: difference
@@ -11,13 +12,17 @@ weight: 501
---
The `difference()` function computes the difference between subsequent records.
-The user-specified column of numeric type is subtracted while others are kept intact.
+The user-specified columns of numeric type are subtracted while others are kept intact.
_**Function type:** Aggregate_
_**Output data type:** Float_
```js
-difference(nonNegative: false, column: "_value")
+difference(
+ nonNegative: false,
+ columns: ["_value"],
+ keepFirst: false
+)
```
## Parameters
@@ -28,11 +33,18 @@ When set to `true`, if a value is less than the previous value, it is assumed th
_**Data type:** Boolean_
-### column
-The column to use to compute the difference.
-Defaults to `"_value"`.
+### columns
+The columns to use to compute the difference.
+Defaults to `["_value"]`.
-_**Data type:** String_
+_**Data type:** Array of Strings_
+
+### keepFirst
+Indicates the first row should be kept.
+If `true`, the difference will be `null`.
+Defaults to `false`.
+
+_**Data type:** Boolean_
## Subtraction rules for numeric types
- The difference between two non-null values is their algebraic difference;
@@ -90,6 +102,20 @@ from(bucket: "example-bucket")
| 0004 | 6 | tv |
| 0005 | null | tv |
+
+#### With keepFirst set to true
+```js
+|> difference(nonNegative: false, keepfirst: true):
+```
+###### Output table
+| _time | _value | tag |
+|:-----:|:------:|:---:|
+| 0001 | null | tv |
+| 0002 | null | tv |
+| 0003 | -2 | tv |
+| 0004 | 6 | tv |
+| 0005 | null | tv |
+
##### Related InfluxQL functions and statements:
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/doubleema.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/doubleema.md
new file mode 100644
index 000000000..54799e1e1
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/doubleema.md
@@ -0,0 +1,65 @@
+---
+title: doubleEMA() function
+description: >
+ The `doubleEMA()` function calculates the exponential moving average of values
+ grouped into `n` number of points, giving more weight to recent data at double
+ the rate of `exponentialMovingAverage()`.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/doubleema/
+menu:
+ v2_0_ref:
+ name: doubleEMA
+ parent: built-in-aggregates
+weight: 501
+related:
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/movingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/tripleema/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/timedmovingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage/
+ - https://docs.influxdata.com/influxdb/latest/query_language/functions/#double-exponential-moving-average, InfluxQL DOUBLE_EXPONENTIAL_MOVING_AVERAGE()
+---
+
+The `doubleEMA()` function calculates the exponential moving average of values in
+the `_value` column grouped into `n` number of points, giving more weight to recent
+data at double the rate of [`exponentialMovingAverage()`](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage/).
+
+_**Function type:** Aggregate_
+
+```js
+doubleEMA(n: 5)
+```
+
+##### Double exponential moving average rules
+- A double exponential moving average is defined as `doubleEMA = 2 * EMA_N - EMA of EMA_N`.
+ - `EMA` is an exponential moving average.
+ - `N = n` is the period used to calculate the EMA.
+- A true double exponential moving average requires at least `2 * n - 1` values.
+ If not enough values exist to calculate the double EMA, it returns a `NaN` value.
+- `doubleEMA()` inherits all [exponential moving average rules](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage/#exponential-moving-average-rules).
+
+## Parameters
+
+### n
+The number of points to average.
+
+_**Data type:** Integer_
+
+## Examples
+
+#### Calculate a five point double exponential moving average
+```js
+from(bucket: "example-bucket"):
+ |> range(start: -12h)
+ |> doubleEMA(n: 5)
+```
+
+## Function definition
+```js
+doubleEMA = (n, tables=<-) =>
+ tables
+ |> exponentialMovingAverage(n:n)
+ |> duplicate(column:"_value", as:"ema")
+ |> exponentialMovingAverage(n:n)
+ |> map(fn: (r) => ({r with _value: 2.0 * r.ema - r._value}))
+ |> drop(columns: ["ema"])
+```
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage.md
new file mode 100644
index 000000000..960768f25
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage.md
@@ -0,0 +1,75 @@
+---
+title: exponentialMovingAverage() function
+description: >
+ The `exponentialMovingAverage()` function calculates the exponential moving average of values
+ in the `_value` column grouped into `n` number of points, giving more weight to recent data.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/exponentialmovingaverage/
+menu:
+ v2_0_ref:
+ name: exponentialMovingAverage
+ parent: built-in-aggregates
+weight: 501
+related:
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/movingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/timedmovingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/doubleema/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/tripleema/
+ - https://docs.influxdata.com/influxdb/latest/query_language/functions/#exponential-moving-average, InfluxQL EXPONENTIAL_MOVING_AVERAGE()
+---
+
+The `exponentialMovingAverage()` function calculates the exponential moving average of values
+in the `_value` column grouped into `n` number of points, giving more weight to recent data.
+
+_**Function type:** Aggregate_
+
+```js
+exponentialMovingAverage(n: 5)
+```
+
+##### Exponential moving average rules
+- The first value of an exponential moving average over `n` values is the
+ algebraic mean of `n` values.
+- Subsequent values are calculated as `y(t) = x(t) * k + y(t-1) * (1 - k)`, where:
+ - `y(t)` is the exponential moving average at time `t`.
+ - `x(t)` is the value at time `t`.
+ - `k = 2 / (1 + n)`.
+- The average over a period populated by only `null` values is `null`.
+- Exponential moving averages skip `null` values.
+
+## Parameters
+
+### n
+The number of points to average.
+
+_**Data type:** Integer_
+
+## Examples
+
+#### Calculate a five point exponential moving average
+```js
+from(bucket: "example-bucket"):
+ |> range(start: -12h)
+ |> exponentialMovingAverage(n: 5)
+```
+
+#### Table transformation with a two point exponential moving average
+
+###### Input table:
+| _time | tag | _value |
+|:-----:|:---:|:------:|
+| 0001 | tv | null |
+| 0002 | tv | 10 |
+| 0003 | tv | 20 |
+
+###### Query:
+```js
+// ...
+ |> exponentialMovingAverage(n: 2)
+```
+
+###### Output table:
+| _time | tag | _value |
+|:-----:|:---:|:------:|
+| 0002 | tv | 10 |
+| 0003 | tv | 16.67 |
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/histogramquantile.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/histogramquantile.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/histogramquantile.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/histogramquantile.md
index 7b135a9d8..5cda2d839 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/histogramquantile.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/histogramquantile.md
@@ -5,6 +5,7 @@ description: >
that approximates the cumulative distribution of the dataset.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/histogramquantile
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/histogramquantile/
menu:
v2_0_ref:
name: histogramQuantile
@@ -75,7 +76,7 @@ _**Data type:** String_
The assumed minimum value of the dataset.
When the quantile falls below the lowest upper bound, interpolation is performed between `minValue` and the lowest upper bound.
When `minValue` is equal to negative infinity, the lowest upper bound is used.
-Defaults to `0`.
+Defaults to `0.0`.
_**Data type:** Float_
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/holtwinters.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/holtwinters.md
new file mode 100644
index 000000000..7ad44ac11
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/holtwinters.md
@@ -0,0 +1,115 @@
+---
+title: holtWinters() function
+description: >
+ The `holtWinters()` function applies the Holt-Winters forecasting method to input tables.
+aliases:
+ - /v2.0/reference/flux/functions/transformations/aggregates/holtwinters
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/holtwinters/
+menu:
+ v2_0_ref:
+ name: holtWinters
+ parent: built-in-aggregates
+weight: 501
+related:
+ - https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters, InfluxQL HOLT_WINTERS()
+---
+
+The `holtWinters()` function applies the Holt-Winters forecasting method to input tables.
+
+_**Function type:** Aggregate_
+_**Output data type:** Float_
+
+```js
+holtWinters(
+ n: 10,
+ seasonality: 4,
+ interval: 30d,
+ withFit: false,
+ timeColumn: "_time",
+ column: "_value",
+)
+```
+
+The Holt-Winters method predicts [`n`](#n) seasonally-adjusted values for the
+specified [`column`](#column) at the specified [`interval`](#interval).
+For example, if `interval` is `6m` and `n` is `3`, results include three predicted
+values six minutes apart.
+
+#### Seasonality
+[`seasonality`](#seasonality) delimits the length of a seasonal pattern according to `interval`.
+If your `interval` is `2m` and `seasonality` is `4`, then the seasonal pattern occurs every
+eight minutes or every four data points.
+If data doesn't have a seasonal pattern, set `seasonality` to `0`.
+
+#### Space values evenly in time
+`holtWinters()` expects values evenly spaced in time.
+To ensure `holtWinters()` values are spaced evenly in time, the following rules apply:
+
+- Data is grouped into time-based "buckets" determined by the `interval`.
+- If a bucket includes many values, the first value is used.
+- If a bucket includes no values, a missing value (`null`) is added for that bucket.
+
+By default, `holtWinters()` uses the first value in each time bucket to run the Holt-Winters calculation.
+To specify other values to use in the calculation, use:
+
+- [`window()`](/v2.0/reference/flux/stdlib/built-in/transformations/window/)
+ with [selectors](/v2.0/reference/flux/stdlib/built-in/transformations/selectors/)
+ or [aggregates](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/)
+- [`aggregateWindow()`](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/aggregatewindow)
+
+#### Fitted model
+The `holtWinters()` function applies the [Nelder-Mead optimization](https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method)
+to include "fitted" data points in results when [`withFit`](#withfit) is set to `true`.
+
+#### Null timestamps
+`holtWinters()` discards rows with `null` timestamps before running the Holt-Winters calculation.
+
+#### Null values
+`holtWinters()` treats `null` values as missing data points and includes them in the Holt-Winters calculation.
+
+## Parameters
+
+### n
+The number of values to predict.
+
+_**Data type:** Integer_
+
+### seasonality
+The number of points in a season.
+Defaults to `0`.
+
+_**Data type:** Integer_
+
+### interval
+The interval between two data points.
+
+_**Data type:** Duration_
+
+### withFit
+Return [fitted data](#fitted-model) in results.
+Defaults to `false`.
+
+_**Data type:** Boolean_
+
+### timeColumn
+The time column to use.
+Defaults to `"_time"`.
+
+_**Data type:** String_
+
+### column
+The column to operate on.
+Defaults to `"_value"`.
+
+_**Data type:** String_
+
+## Examples
+
+##### Use aggregateWindow to prepare data for holtWinters
+```js
+from(bucket: "example-bucket")
+ |> range(start: -7y)
+ |> filter(fn: (r) => r._field == "water_level")
+ |> aggregateWindow(every: 379m, fn: first).
+ |> holtWinters(n: 10, seasonality: 4, interval: 379m)
+```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/increase.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/increase.md
similarity index 84%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/increase.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/increase.md
index b948fb865..7528cca0f 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/increase.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/increase.md
@@ -3,6 +3,7 @@ title: increase() function
description: The `increase()` function calculates the total non-negative difference between values in a table.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/increase
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/increase/
menu:
v2_0_ref:
name: increase
@@ -10,7 +11,8 @@ menu:
weight: 501
---
-The `increase()` function calculates the total non-negative difference between values in a table.
+The `increase()` function calculates the total non-negative difference between
+subsequent values.
A main use case is tracking changes in counter values which may wrap over time
when they hit a threshold or are reset.
In the case of a wrap/reset, we can assume that the absolute delta between two
@@ -20,16 +22,16 @@ _**Function type:** Aggregate_
_**Output data type:** Float_
```js
-increase(column: "_values")
+increase(columns: ["_value"])
```
## Parameters
-### column
-The column for which the increase is calculated.
-Defaults to `"_value"`.
+### columns
+The columns to use in the operation.
+Defaults to `["_value"]`.
-_**Data type:** Strings_
+_**Data type:** Array of strings_
## Examples
```js
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/integral.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/integral.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/integral.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/integral.md
index 862b417d8..c3593844f 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/integral.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/integral.md
@@ -3,6 +3,7 @@ title: integral() function
description: The `integral()` function computes the area under the curve per unit of time of subsequent non-null records.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/integral
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/integral/
menu:
v2_0_ref:
name: integral
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/kaufmansama.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/kaufmansama.md
new file mode 100644
index 000000000..2d76e185d
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/kaufmansama.md
@@ -0,0 +1,54 @@
+---
+title: kaufmansAMA() function
+description: >
+ The `kaufmansAMA()` function calculates the Kaufman's Adaptive Moving Average (KAMA)
+ using values in an input table.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/kaufmansama/
+menu:
+ v2_0_ref:
+ name: kaufmansAMA
+ parent: built-in-aggregates
+weight: 501
+related:
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/kaufmanser/
+ - https://docs.influxdata.com/influxdb/latest/query_language/functions/#kaufmans-adaptive-moving-average, InfluxQL KAUFMANS_ADAPTIVE_MOVING_AVERAGE()
+---
+
+The `kaufmansAMA()` function calculates the Kaufman's Adaptive Moving Average (KAMA)
+using values in an input table.
+
+_**Function type:** Aggregate_
+
+```js
+kaufmansAMA(
+ n: 10,
+ column: "_value"
+)
+```
+
+Kaufman's Adaptive Moving Average is a trend-following indicator designed to account
+for market noise or volatility.
+
+## Parameters
+
+### n
+The period or number of points to use in the calculation.
+
+_**Data type:** Integer_
+
+### column
+The column to operate on.
+Defaults to `"_value"`.
+
+_**Data type:** String_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen"):
+ |> range(start: -7d)
+ |> kaufmansAMA(
+ n: 10,
+ column: "_value"
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/kaufmanser.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/kaufmanser.md
new file mode 100644
index 000000000..cac7b51c2
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/kaufmanser.md
@@ -0,0 +1,44 @@
+---
+title: kaufmansER() function
+description: >
+ The `kaufmansER()` function calculates the Kaufman's Efficiency Ratio (KER) using
+ values in an input table.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/kaufmanser/
+menu:
+ v2_0_ref:
+ name: kaufmansER
+ parent: built-in-aggregates
+weight: 501
+related:
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/kaufmansama/
+ - https://docs.influxdata.com/influxdb/latest/query_language/functions/#kaufmans-efficiency-ratio, InfluxQL KAUFMANS_EFFICIENCY_RATIO()
+---
+
+The `kaufmansER()` function calculates the Kaufman's Efficiency Ratio (KER) using
+values in an input table.
+The function operates on the `_value` column.
+
+_**Function type:** Aggregate_
+
+```js
+kaufmansER(n: 10)
+```
+
+Kaufman's Efficiency Ratio indicator divides the absolute value of the
+Chande Momentum Oscillator by 100 to return a value between 0 and 1.
+Higher values represent a more efficient or trending market.
+
+## Parameters
+
+### n
+The period or number of points to use in the calculation.
+
+_**Data type:** Integer_
+
+## Examples
+```js
+from(bucket: "example-bucket")
+ |> range(start: -7d)
+ |> kaufmansER(n: 10)
+```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/mean.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/mean.md
similarity index 92%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/mean.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/mean.md
index 5f0cc4366..6065bdadc 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/mean.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/mean.md
@@ -3,6 +3,7 @@ title: mean() function
description: The `mean()` function computes the mean or average of non-null records in the input table.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/mean
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/mean/
menu:
v2_0_ref:
name: mean
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/median.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/median.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/median.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/median.md
index eff12a6f7..271ca394f 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/median.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/median.md
@@ -5,6 +5,7 @@ description: >
in the input table with values that fall within the `0.5` quantile or 50th percentile.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/median
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/median/
menu:
v2_0_ref:
name: median
@@ -12,7 +13,7 @@ menu:
weight: 501
---
-The `median()` function is a special application of the [`quantile()` function](/v2.0/reference/flux/functions/built-in/transformations/aggregates/quantile)
+The `median()` function is a special application of the [`quantile()` function](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/quantile)
that returns the median `_value` of an input table or all non-null records in the input table
with values that fall within the `0.5` quantile (50th percentile) depending on the [method](#method) used.
@@ -36,9 +37,9 @@ value that represents the `0.5` quantile.
{{% note %}}
The `median()` function can only be used with float value types.
-It is a special application of the [`quantile()` function](/v2.0/reference/flux/functions/built-in/transformations/aggregates/quantile)
+It is a special application of the [`quantile()` function](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/quantile)
which uses an approximation implementation that requires floats.
-You can convert your value column to a float column using the [`toFloat()` function](/v2.0/reference/flux/functions/built-in/transformations/type-conversions/tofloat).
+You can convert your value column to a float column using the [`toFloat()` function](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/tofloat).
{{% /note %}}
## Parameters
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/mode.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/mode.md
new file mode 100644
index 000000000..8afdee34e
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/mode.md
@@ -0,0 +1,62 @@
+---
+title: mode() function
+description: >
+ The `mode()` function computes the mode or value that occurs most often in a
+ specified column in the input table.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/mode/
+menu:
+ v2_0_ref:
+ name: mode
+ parent: built-in-aggregates
+weight: 501
+---
+
+The `mode()` function computes the mode or value that occurs most often in a
+specified column in the input table.
+
+_**Function type:** Aggregate_
+
+```js
+mode(column: "_value")
+```
+
+If there are multiple modes, it returns all of them in a sorted table.
+Mode only considers non-null values.
+If there is no mode, `mode()` returns `null`.
+
+##### Supported data types
+
+- String
+- Float
+- Integer
+- UInteger
+- Boolean
+- Time
+
+## Parameters
+
+### column
+The column to use to compute the mode.
+Defaults to `"_value"`.
+
+_**Data type:** String_
+
+## Examples
+
+###### Return the mode of windowed data
+```js
+from(bucket: "example-bucket")
+ |> filter(fn: (r) =>
+ r._measurement == "errors" and
+ r._field == "count_per_minute"
+ )
+ |> range(start:-12h)
+ |> window(every:10m)
+ |> mode()
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[MODE()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#mode)
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/movingaverage.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/movingaverage.md
new file mode 100644
index 000000000..7daeee63c
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/movingaverage.md
@@ -0,0 +1,71 @@
+---
+title: movingAverage() function
+description: >
+ The `movingAverage()` function calculates the mean of values grouped into `n` number of points.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/movingaverage/
+menu:
+ v2_0_ref:
+ name: movingAverage
+ parent: built-in-aggregates
+weight: 501
+related:
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/timedmovingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/doubleema/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/tripleema/
+ - https://docs.influxdata.com/influxdb/latest/query_language/functions/#moving-average, InfluxQL MOVING_AVERAGE()
+---
+
+The `movingAverage()` function calculates the mean of values in the `_values` column
+grouped into `n` number of points.
+
+_**Function type:** Aggregate_
+
+```js
+movingAverage(n: 5)
+```
+
+##### Moving average rules
+- The average over a period populated by `n` values is equal to their algebraic mean.
+- The average over a period populated by only `null` values is `null`.
+- Moving averages skip `null` values.
+- If `n` is less than the number of records in a table, `movingAverage` returns
+ the average of the available values.
+
+## Parameters
+
+### n
+The number of points to average.
+
+_**Data type:** Integer_
+
+## Examples
+
+#### Calculate a five point moving average
+```js
+from(bucket: "example-bucket"):
+ |> range(start: -12h)
+ |> movingAverage(n: 5)
+```
+
+#### Table transformation with a two point moving average
+
+###### Input table:
+| _time | tag | _value |
+|:-----:|:---:|:------:|
+| 0001 | tv | null |
+| 0002 | tv | 6 |
+| 0003 | tv | 4 |
+
+###### Query:
+```js
+// ...
+ |> movingAverage(n: 2 )
+```
+
+###### Output table:
+| _time | tag | _value |
+|:-----:|:---:|:------:|
+| 0002 | tv | 6 |
+| 0003 | tv | 5 |
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/pearsonr.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/pearsonr.md
similarity index 94%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/pearsonr.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/pearsonr.md
index 8aaa439fe..f5e4bc776 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/pearsonr.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/pearsonr.md
@@ -3,6 +3,7 @@ title: pearsonr() function
description: The `pearsonr()` function computes the Pearson R correlation coefficient between two streams by first joining the streams, then performing the covariance operation normalized to compute R.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/pearsonr
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/pearsonr/
menu:
v2_0_ref:
name: pearsonr
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/quantile.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/quantile.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/quantile.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/quantile.md
index c9c41436e..483455487 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/quantile.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/quantile.md
@@ -4,6 +4,7 @@ description: The `quantile()` function outputs non-null records with values that
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/percentile
- /v2.0/reference/flux/functions/built-in/transformations/aggregates/percentile
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/quantile/
menu:
v2_0_ref:
name: quantile
@@ -66,7 +67,7 @@ A selector method that returns the data point for which at least `q` points are
### compression
Indicates how many centroids to use when compressing the dataset.
A larger number produces a more accurate result at the cost of increased memory requirements.
-Defaults to 1000.
+Defaults to `1000.0`.
_**Data type:** Float_
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/reduce.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/reduce.md
similarity index 70%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/reduce.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/reduce.md
index 900f87582..72f453a62 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/reduce.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/reduce.md
@@ -3,11 +3,14 @@ title: reduce() function
description: >
The `reduce()` function aggregates records in each table according to the reducer,
`fn`, providing a way to create custom table aggregations.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/reduce/
menu:
v2_0_ref:
name: reduce
parent: built-in-aggregates
weight: 501
+v2.0/tags: [exists]
---
The `reduce()` function aggregates records in each table according to the reducer,
@@ -30,6 +33,10 @@ However, if two reduced tables write to the same destination group key, the func
## Parameters
+{{% note %}}
+Make sure `fn` parameter names match each specified parameter. To learn why, see [Match parameter names](/v2.0/reference/flux/language/data-model/#match-parameter-names).
+{{% /note %}}
+
### fn
Function to apply to each record with a reducer object ([`identity`](#identity)).
@@ -74,6 +81,26 @@ identity: {identityKey1: value1, identityKey2: value2}
identity: {sum: 0.0, count: 0.0}
```
+## Important notes
+
+#### Preserve columns
+By default, `reduce()` drops any columns that:
+
+1. Are not part of the input table's group key.
+2. Are not explicitly mapped in the `reduce()` function.
+
+This often results in the `_time` column being dropped.
+To preserve the `_time` column and other columns that do not meet the criteria above,
+use the `with` operator to map values in the `r` object.
+The `with` operator updates a column if it already exists,
+creates a new column if it doesn't exist, and includes all existing columns in
+the output table.
+
+```js
+reduce(fn: (r) => ({ r with newColumn: r._value * 2 }))
+```
+
+
## Examples
##### Compute the sum of the value column
@@ -126,3 +153,19 @@ from(bucket:"example-bucket")
identity: {prod: 1.0}
)
```
+
+##### Calculate the average and preserve existing columns
+```js
+from(bucket: "example-bucket")
+ |> range(start: v.timeRangeStart, stop: v.timeRangeStop)
+ |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent")
+ |> window(every: 5m)
+ |> reduce(fn: (r, accumulator) => ({
+ r with
+ count: accumulator.count + 1,
+ total: accumulator.total + r._value,
+ avg: (accumulator.total + r._value) / float(v: accumulator.count)
+ }),
+ identity: {count: 1, total: 0.0, avg: 0.0}
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/relativestrengthindex.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/relativestrengthindex.md
new file mode 100644
index 000000000..58a8cb856
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/relativestrengthindex.md
@@ -0,0 +1,105 @@
+---
+title: relativeStrengthIndex() function
+description: >
+ The `relativeStrengthIndex()` function measures the relative speed and change of
+ values in an input table.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/relativestrengthindex/
+menu:
+ v2_0_ref:
+ name: relativeStrengthIndex
+ parent: built-in-aggregates
+weight: 501
+related:
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/movingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/timedmovingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage/
+ - https://docs.influxdata.com/influxdb/latest/query_language/functions/#relative-strength-index, InfluxQL RELATIVE_STRENGTH_INDEX()
+---
+
+The `relativeStrengthIndex()` function measures the relative speed and change of
+values in an input table.
+
+_**Function type:** Aggregate_
+
+```js
+relativeStrengthIndex(
+ n: 5,
+ columns: ["_value"]
+)
+```
+
+##### Relative strength index rules
+- The general equation for calculating a relative strength index (RSI) is
+ `RSI = 100 - (100 / (1 + (AVG GAIN / AVG LOSS)))`.
+- For the first value of the RSI, `AVG GAIN` and `AVG LOSS` are averages of the `n` period.
+- For subsequent calculations:
+ - `AVG GAIN` = `((PREVIOUS AVG GAIN) * (n - 1)) / n`
+ - `AVG LOSS` = `((PREVIOUS AVG LOSS) * (n - 1)) / n`
+- `relativeStrengthIndex()` ignores `null` values.
+
+## Parameters
+
+### n
+The number of values to use to calculate the RSI.
+
+_**Data type:** Integer_
+
+### columns
+Columns to operate on. _Defaults to `["_value"]`_.
+
+_**Data type:** Array of Strings_
+
+## Examples
+
+#### Calculate a five point relative strength index
+```js
+from(bucket: "example-bucket"):
+ |> range(start: -12h)
+ |> relativeStrengthIndex(n: 5)
+```
+
+#### Table transformation with a ten point RSI
+
+###### Input table:
+| _time | A | B | tag |
+|:-----:|:----:|:----:|:---:|
+| 0001 | 1 | 1 | tv |
+| 0002 | 2 | 2 | tv |
+| 0003 | 3 | 3 | tv |
+| 0004 | 4 | 4 | tv |
+| 0005 | 5 | 5 | tv |
+| 0006 | 6 | 6 | tv |
+| 0007 | 7 | 7 | tv |
+| 0008 | 8 | 8 | tv |
+| 0009 | 9 | 9 | tv |
+| 0010 | 10 | 10 | tv |
+| 0011 | 11 | 11 | tv |
+| 0012 | 12 | 12 | tv |
+| 0013 | 13 | 13 | tv |
+| 0014 | 14 | 14 | tv |
+| 0015 | 15 | 15 | tv |
+| 0016 | 16 | 16 | tv |
+| 0017 | 17 | null | tv |
+| 0018 | 18 | 17 | tv |
+
+###### Query:
+```js
+// ...
+ |> relativeStrengthIndex(
+ n: 10,
+ columns: ["A", "B"]
+ )
+```
+
+###### Output table:
+| _time | A | B | tag |
+|:-----:|:----:|:----:|:---:|
+| 0011 | 100 | 100 | tv |
+| 0012 | 100 | 100 | tv |
+| 0013 | 100 | 100 | tv |
+| 0014 | 100 | 100 | tv |
+| 0015 | 100 | 100 | tv |
+| 0016 | 90 | 90 | tv |
+| 0017 | 81 | 90 | tv |
+| 0018 | 72.9 | 81 | tv |
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/skew.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/skew.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/skew.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/skew.md
index 529fe62b2..04e223c8c 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/skew.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/skew.md
@@ -3,6 +3,7 @@ title: skew() function
description: The `skew()` function outputs the skew of non-null records as a float.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/skew
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/skew/
menu:
v2_0_ref:
name: skew
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/spread.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/spread.md
similarity index 94%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/spread.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/spread.md
index 38a8abbaa..b0ca55f65 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/spread.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/spread.md
@@ -3,6 +3,7 @@ title: spread() function
description: The `spread()` function outputs the difference between the minimum and maximum values in a specified column.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/spread
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/spread/
menu:
v2_0_ref:
name: spread
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/stddev.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/stddev.md
similarity index 94%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/stddev.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/stddev.md
index 66889cd76..027bb214f 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/stddev.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/stddev.md
@@ -3,6 +3,7 @@ title: stddev() function
description: The `stddev()` function computes the standard deviation of non-null records in a specified column.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/stddev
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/stddev/
menu:
v2_0_ref:
name: stddev
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/sum.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/sum.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/built-in/transformations/aggregates/sum.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/sum.md
index e78449591..15b614462 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/aggregates/sum.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/sum.md
@@ -3,6 +3,7 @@ title: sum() function
description: The `sum()` function computes the sum of non-null records in a specified column.
aliases:
- /v2.0/reference/flux/functions/transformations/aggregates/sum
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/sum/
menu:
v2_0_ref:
name: sum
@@ -41,4 +42,4 @@ from(bucket: "example-bucket")
##### Related InfluxQL functions and statements:
-[SUM()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#sum)
+[SUM()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#sum)
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/timedmovingaverage.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/timedmovingaverage.md
new file mode 100644
index 000000000..2d1b0be01
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/timedmovingaverage.md
@@ -0,0 +1,85 @@
+---
+title: timedMovingAverage() function
+description: >
+ The `timedMovingAverage()` function calculates the mean of values in a defined time
+ range at a specified frequency.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/timedmovingaverage/
+menu:
+ v2_0_ref:
+ name: timedMovingAverage
+ parent: built-in-aggregates
+weight: 501
+related:
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/movingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/doubleema/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/tripleema/
+ - https://docs.influxdata.com/influxdb/latest/query_language/functions/#moving-average, InfluxQL MOVING_AVERAGE()
+---
+
+The `timedMovingAverage()` function calculates the mean of values in a defined time
+range at a specified frequency.
+
+_**Function type:** Aggregate_
+
+```js
+timedMovingAverage(
+ every: 1d,
+ period: 5d,
+ column="_value"
+)
+```
+
+## Parameters
+
+### every
+The frequency of time windows.
+
+_**Data type:** Duration_
+
+### period
+The length of each averaged time window.
+_A negative duration indicates start and stop boundaries are reversed._
+
+_**Data type:** Duration_
+
+### column
+The column used to compute the moving average.
+Defaults to `"_value"`.
+
+_**Data type:** String_
+
+## Examples
+
+###### Calculate a seven day moving average every day
+```js
+from(bucket: "example-bucket"):
+ |> range(start: -7y)
+ |> filter(fn: (r) =>
+ r._measurement == "financial" and
+ r._field == "closing_price"
+ )
+ |> timedMovingAverage(every: 1y, period: 5y)
+```
+
+###### Calculate a five year moving average every year
+```js
+from(bucket: "example-bucket"):
+ |> range(start: -50d)
+ |> filter(fn: (r) =>
+ r._measurement == "financial" and
+ r._field == "closing_price"
+ )
+ |> timedMovingAverage(every: 1d, period: 7d)
+```
+
+## Function definition
+```js
+timedMovingAverage = (every, period, column="_value", tables=<-) =>
+ tables
+ |> window(every: every, period: period)
+ |> mean(column:column)
+ |> duplicate(column: "_stop", as: "_time")
+ |> window(every: inf)
+```
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/tripleema.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/tripleema.md
new file mode 100644
index 000000000..140882c33
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/tripleema.md
@@ -0,0 +1,70 @@
+---
+title: tripleEMA() function
+description: >
+ The `tripleEMA()` function calculates the exponential moving average of values
+ grouped into `n` number of points, giving more weight to recent data with less lag
+ than `exponentialMovingAverage()` and `doubleEMA()`.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/tripleema/
+menu:
+ v2_0_ref:
+ name: tripleEMA
+ parent: built-in-aggregates
+weight: 501
+related:
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/movingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/doubleema/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/timedmovingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage/
+ - https://docs.influxdata.com/influxdb/latest/query_language/functions/#triple-exponential-moving-average, InfluxQL TRIPLE_EXPONENTIAL_MOVING_AVERAGE()
+---
+
+The `tripleEMA()` function calculates the exponential moving average of values in
+the `_value` column grouped into `n` number of points, giving more weight to recent
+data with less lag than
+[`exponentialMovingAverage()`](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage/)
+and [`doubleEMA()`](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/doubleema/).
+
+_**Function type:** Aggregate_
+
+```js
+tripleEMA(n: 5)
+```
+
+##### Triple exponential moving average rules
+- A triple exponential moving average is defined as `tripleEMA = (3 * EMA_1) - (3 * EMA_2) + EMA_3`.
+ - `EMA_1` is the exponential moving average of the original data.
+ - `EMA_2` is the exponential moving average of `EMA_1`.
+ - `EMA_3` is the exponential moving average of `EMA_2`.
+- A true triple exponential moving average requires at least requires at least `3 * n - 2` values.
+ If not enough values exist to calculate the triple EMA, it returns a `NaN` value.
+- `tripleEMA()` inherits all [exponential moving average rules](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage/#exponential-moving-average-rules).
+
+## Parameters
+
+### n
+The number of points to average.
+
+_**Data type:** Integer_
+
+## Examples
+
+#### Calculate a five point triple exponential moving average
+```js
+from(bucket: "example-bucket"):
+ |> range(start: -12h)
+ |> tripleEMA(n: 5)
+```
+
+## Function definition
+```js
+tripleEMA = (n, tables=<-) =>
+ tables
+ |> exponentialMovingAverage(n:n)
+ |> duplicate(column:"_value", as:"ema1")
+ |> exponentialMovingAverage(n:n)
+ |> duplicate(column:"_value", as:"ema2")
+ |> exponentialMovingAverage(n:n)
+ |> map(fn: (r) => ({r with _value: 3.0 * r.ema1 - 3.0 * r.ema2 + r._value}))
+ |> drop(columns: ["ema1", "ema2"])
+```
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/tripleexponentialderivative.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/tripleexponentialderivative.md
new file mode 100644
index 000000000..9d6aee924
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/tripleexponentialderivative.md
@@ -0,0 +1,69 @@
+---
+title: tripleExponentialDerivative() function
+description: >
+ The `tripleExponentialDerivative()` function calculates a triple exponential
+ derivative (TRIX) of input tables using `n` points.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/aggregates/tripleexponentialderivative/
+menu:
+ v2_0_ref:
+ name: tripleExponentialDerivative
+ parent: built-in-aggregates
+weight: 501
+v2.0/tags: [technical analysis]
+related:
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/movingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/doubleema/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/tripleema/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/timedmovingaverage/
+ - /v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage/
+ - https://docs.influxdata.com/influxdb/latest/query_language/functions/#triple-exponential-derivative, InfluxQL TRIPLE_EXPONENTIAL_DERIVATIVE()
+---
+
+The `tripleExponentialDerivative()` function calculates a triple exponential
+derivative ([TRIX](https://en.wikipedia.org/wiki/Trix_(technical_analysis))) of
+input tables using `n` points.
+
+_**Function type:** Aggregate_
+
+```js
+tripleExponentialDerivative(n: 5)
+```
+
+Triple exponential derivative, commonly referred to as “TRIX,” is a momentum indicator and oscillator.
+A triple exponential derivative uses the natural logarithm (log) of input data to
+calculate a triple exponential moving average over the period of time.
+The calculation prevents cycles shorter than the defined period from being considered by the indicator.
+`tripleExponentialDerivative()` uses the time between `n` points to define the period.
+
+Triple exponential derivative oscillates around a zero line.
+A positive momentum **oscillator** value indicates an overbought market;
+a negative value indicates an oversold market.
+A positive momentum **indicator** value indicates increasing momentum;
+a negative value indicates decreasing momentum.
+
+##### Triple exponential moving average rules
+- A triple exponential derivative is defined as:
+ - `TRIX[i] = ((EMA3[i] / EMA3[i - 1]) - 1) * 100`:
+ - `EMA_3 = EMA(EMA(EMA(data)))`
+- If there are not enough values to calculate a triple exponential derivative,
+ the output `_value` is `NaN`; all other columns are the same as the _last_ record of the input table.
+- The function behaves the same way as the [`exponentialMovingAverage()`](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/exponentialmovingaverage/) function:
+ - The function does not include `null` values in the calculation.
+ - The function acts only on the `_value` column.
+
+## Parameters
+
+### n
+The number of points to use in the calculation.
+
+_**Data type:** Integer_
+
+## Examples
+
+#### Calculate a five point triple exponential derivative
+```js
+from(bucket: "example-bucket"):
+ |> range(start: -12h)
+ |> tripleExponentialDerivative(n: 5)
+```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/columns.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/columns.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/built-in/transformations/columns.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/columns.md
index e76f5bdc6..2703ce387 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/columns.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/columns.md
@@ -6,6 +6,7 @@ description: >
plus a new column containing the labels of the input table's columns.
aliases:
- /v2.0/reference/flux/functions/transformations/columns
+ - /v2.0/reference/flux/functions/built-in/transformations/columns/
menu:
v2_0_ref:
name: columns
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/cumulativesum.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/cumulativesum.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/built-in/transformations/cumulativesum.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/cumulativesum.md
index c63d5f054..72d4087e6 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/cumulativesum.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/cumulativesum.md
@@ -3,6 +3,7 @@ title: cumulativeSum() function
description: The `cumulativeSum()` function computes a running sum for non-null records in the table.
aliases:
- /v2.0/reference/flux/functions/transformations/cumulativesum
+ - /v2.0/reference/flux/functions/built-in/transformations/cumulativesum/
menu:
v2_0_ref:
name: cumulativeSum
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/drop.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/drop.md
similarity index 85%
rename from content/v2.0/reference/flux/functions/built-in/transformations/drop.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/drop.md
index 62c05cfff..2c2f70133 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/drop.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/drop.md
@@ -3,6 +3,7 @@ title: drop() function
description: The `drop()` function removes specified columns from a table.
aliases:
- /v2.0/reference/flux/functions/transformations/drop
+ - /v2.0/reference/flux/functions/built-in/transformations/drop/
menu:
v2_0_ref:
name: drop
@@ -28,13 +29,19 @@ drop(fn: (column) => column =~ /usage*/)
## Parameters
+{{% note %}}
+Make sure `fn` parameter names match each specified parameter. To learn why, see [Match parameter names](/v2.0/reference/flux/language/data-model/#match-parameter-names).
+{{% /note %}}
+
### columns
+
Columns to be removed from the table.
Cannot be used with `fn`.
_**Data type:** Array of strings_
### fn
+
A predicate function which takes a column name as a parameter (`column`) and returns
a boolean indicating whether or not the column should be removed from the table.
Cannot be used with `columns`.
@@ -44,6 +51,7 @@ _**Data type:** Function_
## Examples
##### Drop a list of columns
+
```js
from(bucket: "example-bucket")
|> range(start: -5m)
@@ -51,6 +59,7 @@ from(bucket: "example-bucket")
```
##### Drop columns matching a predicate
+
```js
from(bucket: "example-bucket")
|> range(start: -5m)
@@ -60,4 +69,5 @@ from(bucket: "example-bucket")
##### Related InfluxQL functions and statements:
+
[DROP MEASUREMENT](https://docs.influxdata.com/influxdb/latest/query_language/database_management/#delete-measurements-with-drop-measurement)
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/duplicate.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/duplicate.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/built-in/transformations/duplicate.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/duplicate.md
index 54a90f9b2..cbc060dc0 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/duplicate.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/duplicate.md
@@ -3,6 +3,7 @@ title: duplicate() function
description: The `duplicate()` function duplicates a specified column in a table.
aliases:
- /v2.0/reference/flux/functions/transformations/duplicate
+ - /v2.0/reference/flux/functions/built-in/transformations/duplicate/
menu:
v2_0_ref:
name: duplicate
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/elapsed.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/elapsed.md
new file mode 100644
index 000000000..b297476c2
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/elapsed.md
@@ -0,0 +1,56 @@
+---
+title: elapsed() function
+description: The `elapsed()` function returns the time between subsequent records.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/elapsed/
+menu:
+ v2_0_ref:
+ name: elapsed
+ parent: built-in-transformations
+weight: 401
+---
+
+The `elapsed()` function returns the time between subsequent records.
+Given an input table, `elapsed()` returns the same table without the first record
+(as elapsed time is not defined) and an additional column containing the elapsed time.
+
+_**Function type:** Transformation_
+
+```js
+elapsed(
+ unit: 1s,
+ timeColumn: "_time",
+ columnName: "elapsed"
+)
+```
+
+_`elapsed()` returns an errors if the `timeColumn` is not present in the input table._
+
+## Parameters
+
+### unit
+The unit time to returned.
+_Defaults to `1s`._
+
+_**Data type:** Duration_
+
+### timeColumn
+The column to use to compute the elapsed time.
+_Defaults to `"_time"`._
+
+_**Data type:** String_
+
+### columnName
+The column to store elapsed times.
+_Defaults to `"elapsed"`._
+
+_**Data type:** String_
+
+## Examples
+
+##### Calculate the time between points in seconds
+```js
+from(bucket: "example-bucket")
+ |> range(start: -5m)
+ |> elapsed(unit: 1s)
+```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/fill.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/fill.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/built-in/transformations/fill.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/fill.md
index ff534a98d..9cd01d630 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/fill.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/fill.md
@@ -3,6 +3,7 @@ title: fill() function
description: The `fill()` function replaces all null values in an input stream and replace them with a non-null value.
aliases:
- /v2.0/reference/flux/functions/transformations/fill
+ - /v2.0/reference/flux/functions/built-in/transformations/fill/
menu:
v2_0_ref:
name: fill
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/filter.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/filter.md
similarity index 61%
rename from content/v2.0/reference/flux/functions/built-in/transformations/filter.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/filter.md
index c89db54e9..7dec4df8c 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/filter.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/filter.md
@@ -3,11 +3,13 @@ title: filter() function
description: The `filter()` function filters data based on conditions defined in a predicate function (fn).
aliases:
- /v2.0/reference/flux/functions/transformations/filter
+ - /v2.0/reference/flux/functions/built-in/transformations/filter/
menu:
v2_0_ref:
name: filter
parent: built-in-transformations
weight: 401
+v2.0/tags: [exists]
---
The `filter()` function filters data based on conditions defined in a predicate function ([`fn`](#fn)).
@@ -22,10 +24,16 @@ filter(fn: (r) => r._measurement == "cpu")
## Parameters
+{{% note %}}
+Make sure `fn` parameter names match each specified parameter. To learn why, see [Match parameter names](/v2.0/reference/flux/language/data-model/#match-parameter-names).
+{{% /note %}}
+
### fn
-A single argument function that evaluates true or false.
+
+A single argument predicate function that evaluates true or false.
Records are passed to the function.
Those that evaluate to true are included in the output tables.
+Records that evaluate to _null_ or false are not included in the output tables.
_**Data type:** Function_
@@ -34,6 +42,8 @@ Objects evaluated in `fn` functions are represented by `r`, short for "record" o
{{% /note %}}
## Examples
+
+##### Filter based on measurement, field, and tag
```js
from(bucket:"example-bucket")
|> range(start:-1h)
@@ -44,7 +54,22 @@ from(bucket:"example-bucket")
)
```
+##### Filter out null values
+```js
+from(bucket:"example-bucket")
+ |> range(start:-1h)
+ |> filter(fn: (r) => exists r._value )
+```
+
+##### Filter values based on thresholds
+```js
+from(bucket:"example-bucket")
+ |> range(start:-1h)
+ |> filter(fn: (r) => r._value > 50.0 and r._value < 65.0 )
+```
+
##### Related InfluxQL functions and statements:
+
[SELECT](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#the-basic-select-statement)
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/group.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/group.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/built-in/transformations/group.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/group.md
index 17412ee6c..f968ed990 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/group.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/group.md
@@ -3,6 +3,7 @@ title: group() function
description: The `group()` function groups records based on their values for specific columns.
aliases:
- /v2.0/reference/flux/functions/transformations/group
+ - /v2.0/reference/flux/functions/built-in/transformations/group/
menu:
v2_0_ref:
name: group
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/histogram.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/histogram.md
similarity index 88%
rename from content/v2.0/reference/flux/functions/built-in/transformations/histogram.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/histogram.md
index 336275a1d..a4e04bb96 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/histogram.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/histogram.md
@@ -3,6 +3,7 @@ title: histogram() function
description: The `histogram()` function approximates the cumulative distribution of a dataset by counting data frequencies for a list of bins.
aliases:
- /v2.0/reference/flux/functions/transformations/histogram
+ - /v2.0/reference/flux/functions/built-in/transformations/histogram/
menu:
v2_0_ref:
name: histogram
@@ -15,7 +16,7 @@ A bin is defined by an upper bound where all data points that are less than or e
The bin counts are cumulative.
Each input table is converted into a single output table representing a single histogram.
-The output table has a the same group key as the input table.
+The output table has the same group key as the input table.
Columns not part of the group key are removed and an upper bound column and a count column are added.
_**Function type:** Transformation_
@@ -56,8 +57,8 @@ _**Data type:** Array of floats_
#### Bin helper functions
The following helper functions can be used to generated bins.
-[linearBins()](/v2.0/reference/flux/functions/built-in/misc/linearbins)
-[logarithmicBins()](/v2.0/reference/flux/functions/built-in/misc/logarithmicbins)
+[linearBins()](/v2.0/reference/flux/stdlib/built-in/misc/linearbins)
+[logarithmicBins()](/v2.0/reference/flux/stdlib/built-in/misc/logarithmicbins)
### normalize
When `true`, will convert the counts into frequency values between 0 and 1.
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/hourselection.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/hourselection.md
new file mode 100644
index 000000000..b56ad99f5
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/hourselection.md
@@ -0,0 +1,56 @@
+---
+title: hourSelection() function
+description: >
+ The `hourSelection()` function retains all rows with time values in a specified hour range.
+ Hours are specified in military time.
+aliases:
+ - /v2.0/reference/flux/functions/transformations/hourselection
+ - /v2.0/reference/flux/functions/built-in/transformations/hourselection/
+menu:
+ v2_0_ref:
+ name: hourSelection
+ parent: built-in-transformations
+weight: 401
+---
+
+The `hourSelection()` function retains all rows with time values in a specified hour range.
+
+_**Function type:** Transformation_
+
+```js
+hourSelection(
+ start: 9,
+ stop: 17,
+ timeColumn: "_time"
+)
+```
+
+## Parameters
+
+### start
+The first hour of the hour range (inclusive).
+Hours range from `[0-23]`.
+
+_**Data type:** Integer_
+
+### stop
+The last hour of the hour range (inclusive).
+Hours range from `[0-23]`.
+
+_**Data type:** Integer_
+
+### timeColumn
+The column that contains the time value.
+Default is `"_time"`.
+
+_**Data type:** String_
+
+## Examples
+
+##### Use only data from 9am to 5pm
+```js
+from(bucket:"example-bucket")
+ |> range(start:-90d)
+ |> filter(fn: (r) => r._measurement == "foot-traffic" )
+ |> hourSelection(start: 9, stop: 17)
+```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/join.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/join.md
similarity index 97%
rename from content/v2.0/reference/flux/functions/built-in/transformations/join.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/join.md
index 2a8f2d8dd..677fe845d 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/join.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/join.md
@@ -3,6 +3,7 @@ title: join() function
description: The `join()` function merges two or more input streams whose values are equal on a set of common columns into a single output stream.
aliases:
- /v2.0/reference/flux/functions/transformations/join
+ - /v2.0/reference/flux/functions/built-in/transformations/join/
menu:
v2_0_ref:
name: join
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/keep.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/keep.md
similarity index 78%
rename from content/v2.0/reference/flux/functions/built-in/transformations/keep.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/keep.md
index 8bb409599..6c4f0dada 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/keep.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/keep.md
@@ -3,6 +3,7 @@ title: keep() function
description: The `keep()` function returns a table containing only the specified columns.
aliases:
- /v2.0/reference/flux/functions/transformations/keep
+ - /v2.0/reference/flux/functions/built-in/transformations/keep/
menu:
v2_0_ref:
name: keep
@@ -12,7 +13,7 @@ weight: 401
The `keep()` function returns a table containing only the specified columns, ignoring all others.
Only columns in the group key that are also specified in the `keep()` function will be kept in the resulting group key.
-_It is the inverse of [`drop`](/v2.0/reference/flux/functions/built-in/transformations/drop)._
+_It is the inverse of [`drop`](/v2.0/reference/flux/stdlib/built-in/transformations/drop)._
_**Function type:** Transformation_
_**Output data type:** Object_
@@ -27,13 +28,19 @@ keep(fn: (column) => column =~ /inodes*/)
## Parameters
+{{% note %}}
+Make sure `fn` parameter names match each specified parameter. To learn why, see [Match parameter names](/v2.0/reference/flux/language/data-model/#match-parameter-names).
+{{% /note %}}
+
### columns
+
Columns that should be included in the resulting table.
Cannot be used with `fn`.
_**Data type:** Array of strings_
### fn
+
A predicate function which takes a column name as a parameter (`column`) and returns
a boolean indicating whether or not the column should be included in the resulting table.
Cannot be used with `columns`.
@@ -43,6 +50,7 @@ _**Data type:** Function_
## Examples
##### Keep a list of columns
+
```js
from(bucket: "example-bucket")
|> range(start: -5m)
@@ -50,6 +58,7 @@ from(bucket: "example-bucket")
```
##### Keep all columns matching a predicate
+
```js
from(bucket: "example-bucket")
|> range(start: -5m)
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/keys.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/keys.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/built-in/transformations/keys.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/keys.md
index e4ecd1b4d..5f82e10b3 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/keys.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/keys.md
@@ -6,6 +6,7 @@ description: >
_value column containing the labels of the input table's group key.
aliases:
- /v2.0/reference/flux/functions/transformations/keys
+ - /v2.0/reference/flux/functions/built-in/transformations/keys/
menu:
v2_0_ref:
name: keys
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/keyvalues.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/keyvalues.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/built-in/transformations/keyvalues.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/keyvalues.md
index a33b86e28..3db7044bc 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/keyvalues.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/keyvalues.md
@@ -3,6 +3,7 @@ title: keyValues() function
description: The `keyValues()` function returns a table with the input table's group key plus two columns, _key and _value, that correspond to unique column + value pairs from the input table.
aliases:
- /v2.0/reference/flux/functions/transformations/keyvalues
+ - /v2.0/reference/flux/functions/built-in/transformations/keyvalues/
menu:
v2_0_ref:
name: keyValues
@@ -31,6 +32,7 @@ keyValues(fn: (schema) => schema.columns |> filter(fn: (r) => r.label =~ /usage
{{% /note %}}
### keyColumns
+
A list of columns from which values are extracted.
All columns indicated must be of the same type.
Each input table must have all of the columns listed by the `keyColumns` parameter.
@@ -38,9 +40,14 @@ Each input table must have all of the columns listed by the `keyColumns` paramet
_**Data type:** Array of strings_
### fn
+
Function used to identify a set of columns.
All columns indicated must be of the same type.
+{{% note %}}
+Make sure `fn` parameter names match each specified parameter. To learn why, see [Match parameter names](/v2.0/reference/flux/language/data-model/#match-parameter-names).
+{{% /note %}}
+
_**Data type:** Function_
## Additional requirements
@@ -52,6 +59,7 @@ _**Data type:** Function_
## Examples
##### Get key values from explicitly defined columns
+
```js
from(bucket: "example-bucket")
|> range(start: -30m)
@@ -60,6 +68,7 @@ from(bucket: "example-bucket")
```
##### Get key values from columns matching a regular expression
+
```js
from(bucket: "example-bucket")
|> range(start: -30m)
@@ -70,6 +79,7 @@ from(bucket: "example-bucket")
##### Related InfluxQL functions and statements:
+
[SHOW MEASUREMENTS](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-measurements)
[SHOW FIELD KEYS](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-field-keys)
[SHOW TAG KEYS](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-tag-keys)
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/limit.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/limit.md
new file mode 100644
index 000000000..dd7915862
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/limit.md
@@ -0,0 +1,51 @@
+---
+title: limit() function
+description: The `limit()` function limits each output table to the first `n` records.
+aliases:
+ - /v2.0/reference/flux/functions/transformations/limit
+ - /v2.0/reference/flux/functions/built-in/transformations/limit/
+menu:
+ v2_0_ref:
+ name: limit
+ parent: built-in-transformations
+weight: 401
+related:
+ - /v2.0/reference/flux/stdlib/built-in/transformations/tail/
+ - https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#the-limit-and-slimit-clauses, InfluxQL LIMIT
+---
+
+The `limit()` function limits each output table to the first [`n`](#n) records.
+The function produces one output table for each input table.
+Each output table contains the first `n` records after the [`offset`](#offset).
+If the input table has less than `offset + n` records, `limit()` outputs all records after the `offset`.
+
+_**Function type:** Filter_
+
+```js
+limit(
+ n:10,
+ offset: 0
+)
+```
+
+## Parameters
+
+### n
+The maximum number of records to output.
+
+_**Data type:** Integer_
+
+### offset
+The number of records to skip per table before limiting to `n`.
+Defaults to `0`.
+
+_**Data type:** Integer_
+
+## Examples
+
+##### Output the first ten records in each table
+```js
+from(bucket:"example-bucket")
+ |> range(start:-1h)
+ |> limit(n:10)
+```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/map.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/map.md
similarity index 59%
rename from content/v2.0/reference/flux/functions/built-in/transformations/map.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/map.md
index e29d4c3b5..b5327c361 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/map.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/map.md
@@ -3,11 +3,13 @@ title: map() function
description: The `map()` function applies a function to each record in the input tables.
aliases:
- /v2.0/reference/flux/functions/transformations/map
+ - /v2.0/reference/flux/functions/built-in/transformations/map/
menu:
v2_0_ref:
name: map
parent: built-in-transformations
weight: 401
+v2.0/tags: [exists]
---
The `map()` function applies a function to each record in the input tables.
@@ -21,31 +23,17 @@ _**Function type:** Transformation_
_**Output data type:** Object_
```js
-map(fn: (r) => r._value * r._value), mergeKey: true)
+map(fn: (r) => ({ _value: r._value * r._value }))
```
-{{% note %}}
-#### Dropped columns
-`map()` drops any columns that:
-
-1. Are not part of the input table's group key
-2. Are not explicitly mapped in the `map()` function.
-
-This often results in the `_time` column being dropped.
-To preserve the `_time` column, include it in your column mapping.
-
-```js
-map(fn: (r) => ({
- _time: r._time,
- ...
- })
-)
-```
-{{% /note %}}
-
## Parameters
+{{% note %}}
+Make sure `fn` parameter names match each specified parameter. To learn why, see [Match parameter names](/v2.0/reference/flux/language/data-model/#match-parameter-names).
+{{% /note %}}
+
### fn
+
A single argument function that to apply to each record.
The return value must be an object.
@@ -55,17 +43,30 @@ _**Data type:** Function_
Objects evaluated in `fn` functions are represented by `r`, short for "record" or "row".
{{% /note %}}
-### mergeKey
-Indicates if the record returned from `fn` should be merged with the group key.
-When merging, all columns on the group key will be added to the record giving precedence to any columns already present on the record.
-When not merging, only columns defined on the returned record will be present on the output records.
-Defaults to `true`.
+## Important notes
-_**Data type:** Boolean_
+#### Preserve columns
+
+By default, `map()` drops any columns that:
+
+1. Are not part of the input table's group key.
+2. Are not explicitly mapped in the `map()` function.
+
+This often results in the `_time` column being dropped.
+To preserve the `_time` column and other columns that do not meet the criteria above,
+use the `with` operator to map values in the `r` object.
+The `with` operator updates a column if it already exists,
+creates a new column if it doesn't exist, and includes all existing columns in
+the output table.
+
+```js
+map(fn: (r) => ({ r with newColumn: r._value * 2 }))
+```
## Examples
###### Square the value of each record
+
```js
from(bucket:"example-bucket")
|> filter(fn: (r) =>
@@ -74,10 +75,11 @@ from(bucket:"example-bucket")
r.cpu == "cpu-total"
)
|> range(start:-12h)
- |> map(fn: (r) => r._value * r._value)
+ |> map(fn: (r) => ({ r with _value: r._value * r._value}))
```
###### Create a new table with new format
+
```js
from(bucket:"example-bucket")
|> filter(fn: (r) =>
@@ -87,7 +89,23 @@ from(bucket:"example-bucket")
|> range(start:-12h)
// create a new table by copying each row into a new format
|> map(fn: (r) => ({
- _time: r._time,
+ time: r._time,
app_server: r.host
}))
```
+
+###### Add new columns and preserve existing columns
+```js
+from(bucket:"example-bucket")
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system"
+ )
+ |> range(start:-12h)
+ // create a new table by copying each row into a new format
+ |> map(fn: (r) => ({
+ r with
+ app_server: r.host,
+ valueInt: int(v: r._value)
+ }))
+```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/pivot.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/pivot.md
similarity index 99%
rename from content/v2.0/reference/flux/functions/built-in/transformations/pivot.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/pivot.md
index 1066d1e11..db5538ce3 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/pivot.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/pivot.md
@@ -3,6 +3,7 @@ title: pivot() function
description: The `pivot()` function collects values stored vertically (column-wise) in a table and aligns them horizontally (row-wise) into logical sets.
aliases:
- /v2.0/reference/flux/functions/transformations/pivot
+ - /v2.0/reference/flux/functions/built-in/transformations/pivot/
menu:
v2_0_ref:
name: pivot
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/range.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/range.md
similarity index 67%
rename from content/v2.0/reference/flux/functions/built-in/transformations/range.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/range.md
index 5b6308d38..379d6e8ac 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/range.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/range.md
@@ -3,6 +3,7 @@ title: range() function
description: The `range()` function filters records based on time bounds.
aliases:
- /v2.0/reference/flux/functions/transformations/range
+ - /v2.0/reference/flux/functions/built-in/transformations/range/
menu:
v2_0_ref:
name: range
@@ -26,26 +27,24 @@ range(start: -15m, stop: now())
## Parameters
### start
-Specifies the oldest time to be included in the results.
+The earliest time to include in results.
+Use a relative duration or absolute time.
+For example, `-1h` or `2019-08-28T22:00:00Z`.
+Durations are relative to `now()`.
-Relative start times are defined using negative durations.
-Negative durations are relative to now.
-Absolute start times are defined using timestamps.
-
-_**Data type:** Duration or Timestamp_
+_**Data type:** Duration | Time_
### stop
-Specifies the exclusive newest time to be included in the results. Defaults to `now`.
+The latest time to include in results.
+Use a relative duration or absolute time.
+For example, `-1h` or `2019-08-28T22:00:00Z`.
+Durations are relative to `now()`.
+Defaults to `now()`.
-Relative stop times are defined using negative durations.
-Negative durations are relative to now.
-Absolute stop times are defined using timestamps.
-
-_**Data type:** Duration or Timestamp_
+_**Data type:** Duration | Time_
{{% note %}}
-Flux only honors [RFC3339 timestamps](/v2.0/reference/flux/language/types#timestamp-format)
-and ignores dates and times provided in other formats.
+Time values in Flux must be in [RFC3339 format](/v2.0/reference/flux/language/types#timestamp-format).
{{% /note %}}
## Examples
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/rename.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/rename.md
similarity index 82%
rename from content/v2.0/reference/flux/functions/built-in/transformations/rename.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/rename.md
index d932cda35..57e3c504f 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/rename.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/rename.md
@@ -3,6 +3,7 @@ title: rename() function
description: The `rename()` function renames specified columns in a table.
aliases:
- /v2.0/reference/flux/functions/transformations/rename
+ - /v2.0/reference/flux/functions/built-in/transformations/rename/
menu:
v2_0_ref:
name: rename
@@ -30,13 +31,19 @@ rename(fn: (column) => "{column}_new")
## Parameters
+{{% note %}}
+Make sure `fn` parameter names match each specified parameter. To learn why, see [Match parameter names](/v2.0/reference/flux/language/data-model/#match-parameter-names).
+{{% /note %}}
+
### columns
+
A map of columns to rename and their corresponding new names.
Cannot be used with `fn`.
_**Data type:** Object_
### fn
+
A function mapping between old and new column names.
Cannot be used with `columns`.
@@ -45,6 +52,7 @@ _**Data type:** Function_
## Examples
##### Rename a single column
+
```js
from(bucket: "example-bucket")
|> range(start: -5m)
@@ -52,6 +60,7 @@ from(bucket: "example-bucket")
```
##### Rename all columns using a function
+
```js
from(bucket: "example-bucket")
|> range(start: -5m)
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/_index.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/_index.md
similarity index 78%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/_index.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/_index.md
index cf67e6d44..62cbf6955 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/_index.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/_index.md
@@ -4,6 +4,7 @@ list_title: Built-in selector functions
description: Flux's built-in selector functions return one or more records based on function logic.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/
menu:
v2_0_ref:
parent: built-in-transformations
@@ -25,5 +26,5 @@ The following selector functions are available:
The following functions can be used as both selectors or aggregates, but they are
categorized as aggregate functions in this documentation:
-- [median](/v2.0/reference/flux/functions/built-in/transformations/aggregates/median)
-- [quantile](/v2.0/reference/flux/functions/built-in/transformations/aggregates/quantile)
+- [median](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/median)
+- [quantile](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/quantile)
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/bottom.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/bottom.md
similarity index 94%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/bottom.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/bottom.md
index 0e82501ab..cb50c166e 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/bottom.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/bottom.md
@@ -3,6 +3,7 @@ title: bottom() function
description: The `bottom()` function sorts a table by columns and keeps only the bottom n records.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/bottom
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/bottom/
menu:
v2_0_ref:
name: bottom
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/distinct.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/distinct.md
similarity index 92%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/distinct.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/distinct.md
index 945db3889..a999c3dca 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/distinct.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/distinct.md
@@ -3,6 +3,7 @@ title: distinct() function
description: The `distinct()` function returns the unique values for a given column.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/distinct
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/distinct/
menu:
v2_0_ref:
name: distinct
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/first.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/first.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/first.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/first.md
index 5767015ca..b2d80c8fd 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/first.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/first.md
@@ -3,6 +3,7 @@ title: first() function
description: The `first()` function selects the first non-null record from an input table.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/first
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/first/
menu:
v2_0_ref:
name: first
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/highestaverage.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/highestaverage.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/highestaverage.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/highestaverage.md
index 054271bdd..7cf9d502c 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/highestaverage.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/highestaverage.md
@@ -3,6 +3,7 @@ title: highestAverage() function
description: The `highestAverage()` function calculates the average of each table in the input stream returns the top `n` records.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/highestaverage
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/highestaverage/
menu:
v2_0_ref:
name: highestAverage
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/highestcurrent.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/highestcurrent.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/highestcurrent.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/highestcurrent.md
index 3a59ce712..ca0a922b0 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/highestcurrent.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/highestcurrent.md
@@ -3,6 +3,7 @@ title: highestCurrent() function
description: The `highestCurrent()` function selects the last record of each table in the input stream and returns the top `n` records.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/highestcurrent
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/highestcurrent/
menu:
v2_0_ref:
name: highestCurrent
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/highestmax.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/highestmax.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/highestmax.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/highestmax.md
index 40dd14f26..e9268edcf 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/highestmax.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/highestmax.md
@@ -3,6 +3,7 @@ title: highestMax() function
description: The `highestMax()` function selects the maximum record from each table in the input stream and returns the top `n` records.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/highestmax
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/highestmax/
menu:
v2_0_ref:
name: highestMax
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/last.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/last.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/last.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/last.md
index 83d7c3e96..52a11a4aa 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/last.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/last.md
@@ -3,6 +3,7 @@ title: last() function
description: The `last()` function selects the last non-null record from an input table.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/last
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/last/
menu:
v2_0_ref:
name: last
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/lowestaverage.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/lowestaverage.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/lowestaverage.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/lowestaverage.md
index 9020333d3..d1ea9fe7c 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/lowestaverage.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/lowestaverage.md
@@ -3,6 +3,7 @@ title: lowestAverage() function
description: The `lowestAverage()` function calculates the average of each table in the input stream returns the lowest `n` records.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/lowestaverage
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/lowestaverage/
menu:
v2_0_ref:
name: lowestAverage
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/lowestcurrent.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/lowestcurrent.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/lowestcurrent.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/lowestcurrent.md
index 0b2855df5..eab20a443 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/lowestcurrent.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/lowestcurrent.md
@@ -3,6 +3,7 @@ title: lowestCurrent() function
description: The `lowestCurrent()` function selects the last record of each table in the input stream and returns the lowest `n` records.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/lowestcurrent
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/lowestcurrent/
menu:
v2_0_ref:
name: lowestCurrent
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/lowestmin.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/lowestmin.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/lowestmin.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/lowestmin.md
index f8b1319cf..8a0cdc57a 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/lowestmin.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/lowestmin.md
@@ -3,6 +3,7 @@ title: lowestMin() function
description: The `lowestMin()` function selects the minimum record from each table in the input stream and returns the lowest `n` records.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/lowestmin
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/lowestmin/
menu:
v2_0_ref:
name: lowestMin
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/max.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/max.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/max.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/max.md
index 3069e2393..9ec939844 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/max.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/max.md
@@ -2,7 +2,8 @@
title: max() function
description: The `max()` function selects record with the highest _value from the input table.
aliases:
- - /v2.0/reference/flux/functions/transformations/selectors/max
+ - /v2.0/reference/flux/functions/transformations/selectors/max
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/max/
menu:
v2_0_ref:
name: max
@@ -41,4 +42,4 @@ from(bucket:"example-bucket")
##### Related InfluxQL functions and statements:
-[MAX()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#max)
+[MAX()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#max)
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/min.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/min.md
similarity index 92%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/min.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/min.md
index 24ea3c573..28c3ba794 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/min.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/min.md
@@ -3,6 +3,7 @@ title: min() function
description: The `min()` function selects record with the lowest _value from the input table.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/min
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/min/
menu:
v2_0_ref:
name: min
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/sample.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/sample.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/sample.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/sample.md
index 2bcc6ae83..7e6f9a7f5 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/sample.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/sample.md
@@ -3,6 +3,7 @@ title: sample() function
description: The `sample()` function selects a subset of the records from the input table.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/sample
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/sample/
menu:
v2_0_ref:
name: sample
@@ -48,4 +49,4 @@ from(bucket:"example-bucket")
##### Related InfluxQL functions and statements:
-[SAMPLE()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#sample)
+[SAMPLE()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#sample)
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/top.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/top.md
similarity index 94%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/top.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/top.md
index 87ffbed73..630bf8f4e 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/top.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/top.md
@@ -3,6 +3,7 @@ title: top() function
description: The `top()` function sorts a table by columns and keeps only the top n records.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/top
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/top/
menu:
v2_0_ref:
name: top
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/unique.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/unique.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/built-in/transformations/selectors/unique.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/unique.md
index fe5047a7f..899ca0bd2 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/selectors/unique.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/selectors/unique.md
@@ -3,6 +3,7 @@ title: unique() function
description: The `unique()` function returns all records containing unique values in a specified column.
aliases:
- /v2.0/reference/flux/functions/transformations/selectors/unique
+ - /v2.0/reference/flux/functions/built-in/transformations/selectors/unique/
menu:
v2_0_ref:
name: unique
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/set.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/set.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/built-in/transformations/set.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/set.md
index 51e88830d..812bc21b0 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/set.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/set.md
@@ -3,6 +3,7 @@ title: set() function
description: The `set()` function assigns a static value to each record in the input table.
aliases:
- /v2.0/reference/flux/functions/transformations/set
+ - /v2.0/reference/flux/functions/built-in/transformations/set/
menu:
v2_0_ref:
name: set
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/sort.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/sort.md
similarity index 94%
rename from content/v2.0/reference/flux/functions/built-in/transformations/sort.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/sort.md
index 679885b53..a4a6f4e1d 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/sort.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/sort.md
@@ -3,6 +3,7 @@ title: sort() function
description: The `sort()` function orders the records within each table.
aliases:
- /v2.0/reference/flux/functions/transformations/sort
+ - /v2.0/reference/flux/functions/built-in/transformations/sort/
menu:
v2_0_ref:
name: sort
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/statecount.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/statecount.md
similarity index 84%
rename from content/v2.0/reference/flux/functions/built-in/transformations/statecount.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/statecount.md
index e121db236..58966ae86 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/statecount.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/statecount.md
@@ -3,6 +3,7 @@ title: stateCount() function
description: The `stateCount()` function computes the number of consecutive records in a given state.
aliases:
- /v2.0/reference/flux/functions/transformations/statecount
+ - /v2.0/reference/flux/functions/built-in/transformations/statecount/
menu:
v2_0_ref:
name: stateCount
@@ -28,7 +29,12 @@ and does not affect the state count._
## Parameters
+{{% note %}}
+Make sure `fn` parameter names match each specified parameter. To learn why, see [Match parameter names](/v2.0/reference/flux/language/data-model/#match-parameter-names).
+{{% /note %}}
+
### fn
+
A single argument function that evaluates true or false to identify the state of the record.
Records are passed to the function.
Those that evaluate to `true` increment the state count.
@@ -37,11 +43,13 @@ Those that evaluate to `false` reset the state count.
_**Data type:** Function_
### column
+
The name of the column added to each record that contains the incremented state count.
_**Data type:** String_
## Examples
+
```js
from("monitor/autogen")
|> range(start: -1h)
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/stateduration.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/stateduration.md
similarity index 85%
rename from content/v2.0/reference/flux/functions/built-in/transformations/stateduration.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/stateduration.md
index 6eb8c6e8f..fb62eb1d8 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/stateduration.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/stateduration.md
@@ -3,6 +3,7 @@ title: stateDuration() function
description: The `stateDuration()` function computes the duration of a given state.
aliases:
- /v2.0/reference/flux/functions/transformations/stateduration
+ - /v2.0/reference/flux/functions/built-in/transformations/stateduration/
menu:
v2_0_ref:
name: stateDuration
@@ -34,7 +35,12 @@ and does not affect the state duration._
## Parameters
+{{% note %}}
+Make sure `fn` parameter names match each specified parameter. To learn why, see [Match parameter names](/v2.0/reference/flux/language/data-model/#match-parameter-names).
+{{% /note %}}
+
### fn
+
A single argument function that evaluates true or false to identify the state of the record.
Records are passed to the function.
Those that evaluate to `true` increment the state duration.
@@ -43,17 +49,21 @@ Those that evaluate to `false` reset the state duration.
_**Data type:** Function_
### column
+
The name of the column added to each record that contains the state duration.
_**Data type:** String_
### unit
+
The unit of time in which the state duration is incremented.
For example: `1s`, `1m`, `1h`, etc.
+The default unit is one second (`1s`).
_**Data type:** Duration_
## Examples
+
```js
from("monitor/autogen")
|> range(start: -1h)
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/stream-table/_index.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/_index.md
similarity index 92%
rename from content/v2.0/reference/flux/functions/built-in/transformations/stream-table/_index.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/_index.md
index 9206961fd..c1c0f1382 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/stream-table/_index.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/_index.md
@@ -5,6 +5,8 @@ seotitle: Flux built-in stream and table functions
description: >
Use stream and table functions to extract a table from a stream of tables and access its
columns and records.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/stream-table/
weight: 401
menu:
v2_0_ref:
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/stream-table/getcolumn.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/getcolumn.md
similarity index 69%
rename from content/v2.0/reference/flux/functions/built-in/transformations/stream-table/getcolumn.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/getcolumn.md
index 2bc462efb..cbc976099 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/stream-table/getcolumn.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/getcolumn.md
@@ -3,6 +3,8 @@ title: getColumn() function
description: >
The `getColumn()` function extracts a column from a table given its label.
If the label is not present in the set of columns, the function errors.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/stream-table/getcolumn/
menu:
v2_0_ref:
name: getColumn
@@ -19,6 +21,13 @@ _**Function type:** Stream and table_
getColumn(column: "_value")
```
+{{% note %}}
+#### Use tableFind() to extract a single table
+`getColumn()` requires a single table as input.
+Use [`tableFind()`](/v2.0/reference/flux/functions/built-in/transformations/stream-table/tablefind/)
+to extract a single table from a stream of tables.
+{{% /note %}}
+
## Parameters
### column
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/stream-table/getrecord.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/getrecord.md
similarity index 69%
rename from content/v2.0/reference/flux/functions/built-in/transformations/stream-table/getrecord.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/getrecord.md
index 9e1f35826..f660be331 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/stream-table/getrecord.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/getrecord.md
@@ -3,6 +3,8 @@ title: getRecord() function
description: >
The `getRecord()` function extracts a record from a table given its index.
If the index is out of bounds, the function errors.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/stream-table/getrecord/
menu:
v2_0_ref:
name: getRecord
@@ -19,6 +21,13 @@ _**Function type:** Stream and table_
getRecord(idx: 0)
```
+{{% note %}}
+#### Use tableFind() to extract a single table
+`getRecord()` requires a single table as input.
+Use [`tableFind()`](/v2.0/reference/flux/functions/built-in/transformations/stream-table/tablefind/)
+to extract a single table from a stream of tables.
+{{% /note %}}
+
## Parameters
### idx
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/stream-table/tablefind.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/tablefind.md
similarity index 69%
rename from content/v2.0/reference/flux/functions/built-in/transformations/stream-table/tablefind.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/tablefind.md
index dc4de72cd..409ea3dbc 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/stream-table/tablefind.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/tablefind.md
@@ -3,6 +3,8 @@ title: tableFind() function
description: >
The `tableFind()` function extracts the first table in a stream of tables whose
group key values match a predicate. If no table is found, the function errors.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/stream-table/tablefind/
menu:
v2_0_ref:
name: tableFind
@@ -16,12 +18,18 @@ group key values match a predicate. If no table is found, the function errors.
_**Function type:** Stream and table_
```js
-tableFind(column: "_value")
+tableFind(fn: (key) => key._field == "fieldName")
```
## Parameters
+{{% note %}}
+Make sure `fn` parameter names match each specified parameter.
+To learn why, see [Match parameter names](/v2.0/reference/flux/language/data-model/#match-parameter-names).
+{{% /note %}}
+
### fn
+
A predicate function for matching keys in a table's group key.
`tableFind` returns the first table that resolves as `true`.
It expects a `key` argument which represents a group key in the input stream.
@@ -29,11 +37,13 @@ It expects a `key` argument which represents a group key in the input stream.
_**Data type:** Function_
##### Example fn function
+
```js
(key) => key._field == "fieldName"
```
## Example
+
```js
t = from(bucket:"example-bucket")
|> range(start: -5m)
@@ -45,6 +55,6 @@ t = from(bucket:"example-bucket")
```
{{% note %}}
-You can use `t` from the example above as input for [`getColumn()`](/v2.0/reference/flux/functions/built-in/transformations/stream-table/getcolumn/)
-and [`getRecord()`](/v2.0/reference/flux/functions/built-in/transformations/stream-table/getrecord/).
+You can use `t` from the example above as input for [`getColumn()`](/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/getcolumn/)
+and [`getRecord()`](/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/getrecord/).
{{% /note %}}
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/tail.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/tail.md
new file mode 100644
index 000000000..b86287b1b
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/tail.md
@@ -0,0 +1,49 @@
+---
+title: tail() function
+description: The `tail()` function limits each output table to the last `n` records.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/tail/
+menu:
+ v2_0_ref:
+ name: tail
+ parent: built-in-transformations
+weight: 401
+related:
+ - /v2.0/reference/flux/functions/built-in/transformations/limit/
+---
+
+The `tail()` function limits each output table to the last [`n`](#n) records.
+The function produces one output table for each input table.
+Each output table contains the last `n` records before the [`offset`](#offset).
+If the input table has less than `offset + n` records, `tail()` outputs all records before the `offset`.
+
+_**Function type:** Filter_
+
+```js
+tail(
+ n:10,
+ offset: 0
+)
+```
+
+## Parameters
+
+### n
+The maximum number of records to output.
+
+_**Data type:** Integer_
+
+### offset
+The number of records to skip at the end of a table table before limiting to `n`.
+Defaults to `0`.
+
+_**Data type:** Integer_
+
+## Examples
+
+##### Output the last ten records in each table
+```js
+from(bucket:"example-bucket")
+ |> range(start:-1h)
+ |> tail(n:10)
+```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/timeshift.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/timeshift.md
similarity index 94%
rename from content/v2.0/reference/flux/functions/built-in/transformations/timeshift.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/timeshift.md
index a936dacaa..be365a397 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/timeshift.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/timeshift.md
@@ -4,6 +4,7 @@ description: The `timeShift()` function adds a fixed duration to time columns.
aliases:
- /v2.0/reference/flux/functions/transformations/shift
- /v2.0/reference/flux/functions/built-in/transformations/shift
+ - /v2.0/reference/flux/functions/built-in/transformations/timeshift/
menu:
v2_0_ref:
name: timeShift
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/truncatetimecolumn.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/truncatetimecolumn.md
new file mode 100644
index 000000000..6b0a0b053
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/truncatetimecolumn.md
@@ -0,0 +1,55 @@
+---
+title: truncateTimeColumn() function
+description: >
+ The `truncateTimeColumn()` function truncates all input table `_time` values to a specified unit.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/truncatetimecolumn/
+menu:
+ v2_0_ref:
+ name: truncateTimeColumn
+ parent: built-in-transformations
+weight: 401
+related:
+ - /v2.0/reference/flux/stdlib/date/truncate/
+---
+
+The `truncateTimeColumn()` function truncates all input table `_time` values to a specified unit.
+
+_**Function type:** Transformation_
+
+```js
+truncateTimeColumn(unit: 1s)
+```
+
+## Parameters
+
+### unit
+The unit of time to truncate to.
+
+_**Data type:** Duration_
+
+{{% note %}}
+Only use `1` and the unit of time to specify the `unit`.
+For example: `1s`, `1m`, `1h`.
+{{% /note %}}
+
+## Examples
+
+##### Truncate all time values to seconds
+```js
+from(bucket:"example-bucket")
+ |> range(start:-1h)
+ |> truncateTimeColumn(unit: 1s)
+```
+
+## Function definition
+```js
+import "date"
+
+truncateTimeColumn = (unit, tables=<-) =>
+ tables
+ |> map(fn: (r) => ({
+ r with _time: date.truncate(t: r._time, unit:unit)
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/_index.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/_index.md
similarity index 89%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/_index.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/_index.md
index 32dc3ad41..713e7c120 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/_index.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/_index.md
@@ -4,6 +4,7 @@ list_title: Built-in type conversion functions
description: Flux's built-in built-in type conversion functions convert columns of the input table into a specific data type.
aliases:
- /v2.0/reference/flux/functions/transformations/type-conversions
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/
menu:
v2_0_ref:
parent: built-in-transformations
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/bool.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/bool.md
similarity index 65%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/bool.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/bool.md
index 6ae7d84ae..03935abee 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/bool.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/bool.md
@@ -1,6 +1,8 @@
---
title: bool() function
description: The `bool()` function converts a single value to a boolean.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/bool/
menu:
v2_0_ref:
name: bool
@@ -25,8 +27,7 @@ The value to convert.
## Examples
```js
from(bucket: "sensor-data")
- |> filter(fn:(r) =>
- r._measurement == "system" and
- )
- |> map(fn:(r) => bool(v: r.responsive))
+ |> range(start: -1m)
+ |> filter(fn:(r) => r._measurement == "system" )
+ |> map(fn:(r) => ({ r with responsive: bool(v: r.responsive) }))
```
diff --git a/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/bytes.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/bytes.md
new file mode 100644
index 000000000..a3707bb88
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/bytes.md
@@ -0,0 +1,32 @@
+---
+title: bytes() function
+description: The `bytes()` function converts a single value to bytes.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/bytes/
+menu:
+ v2_0_ref:
+ name: bytes
+ parent: built-in-type-conversions
+weight: 502
+---
+
+The `bytes()` function converts a single value to bytes.
+
+_**Function type:** Type conversion_
+_**Output data type:** Bytes_
+
+```js
+bytes(v: "1m")
+```
+
+## Parameters
+
+### v
+The value to convert.
+
+## Examples
+```js
+from(bucket: "sensor-data")
+ |> range(start: -1m)
+ |> map(fn:(r) => ({ r with _value: bytes(v: r._value) }))
+```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/duration.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/duration.md
similarity index 66%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/duration.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/duration.md
index 4d17477cc..bb52c0560 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/duration.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/duration.md
@@ -1,6 +1,8 @@
---
title: duration() function
description: The `duration()` function converts a single value to a duration.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/duration/
menu:
v2_0_ref:
name: duration
@@ -25,8 +27,7 @@ The value to convert.
## Examples
```js
from(bucket: "sensor-data")
- |> filter(fn:(r) =>
- r._measurement == "system" and
- )
- |> map(fn:(r) => duration(v: r.uptime))
+ |> range(start: -1m)
+ |> filter(fn:(r) => r._measurement == "system" )
+ |> map(fn:(r) => ({ r with uptime: duration(v: r.uptime) }))
```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/float.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/float.md
similarity index 65%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/float.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/float.md
index 0c27345be..30e54c5b0 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/float.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/float.md
@@ -1,6 +1,8 @@
---
title: float() function
description: The `float()` function converts a single value to a float.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/float/
menu:
v2_0_ref:
name: float
@@ -25,8 +27,7 @@ The value to convert.
## Examples
```js
from(bucket: "sensor-data")
- |> filter(fn:(r) =>
- r._measurement == "camera" and
- )
- |> map(fn:(r) => float(v: r.aperature))
+ |> range(start: -1m)
+ |> filter(fn:(r) => r._measurement == "camera" )
+ |> map(fn:(r) => ({ r with aperature: float(v: r.aperature) }))
```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/int.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/int.md
similarity index 65%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/int.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/int.md
index 1a8026634..a0da92ec4 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/int.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/int.md
@@ -1,6 +1,8 @@
---
title: int() function
description: The `int()` function converts a single value to an integer.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/int/
menu:
v2_0_ref:
name: int
@@ -25,8 +27,7 @@ The value to convert.
## Examples
```js
from(bucket: "sensor-data")
- |> filter(fn:(r) =>
- r._measurement == "camera" and
- )
- |> map(fn:(r) => int(v: r.exposures))
+ |> range(start: -1m)
+ |> filter(fn:(r) => r._measurement == "camera" )
+ |> map(fn:(r) => ({ r with exposures: int(v: r.exposures) }))
```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/string.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/string.md
similarity index 65%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/string.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/string.md
index 3eaf9e2c6..d8c3648bf 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/string.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/string.md
@@ -1,6 +1,8 @@
---
title: string() function
description: The `string()` function converts a single value to a string.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/string/
menu:
v2_0_ref:
name: string
@@ -25,8 +27,7 @@ The value to convert.
## Examples
```js
from(bucket: "sensor-data")
- |> filter(fn:(r) =>
- r._measurement == "system" and
- )
- |> map(fn:(r) => string(v: r.model_number))
+ |> range(start: -1m)
+ |> filter(fn:(r) => r._measurement == "system" )
+ |> map(fn:(r) => ({ r with model_number: string(v: r.model_number) }))
```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/time.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/time.md
similarity index 66%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/time.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/time.md
index 98046b56c..bcaa3fcaa 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/time.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/time.md
@@ -1,6 +1,8 @@
---
title: time() function
description: The `time()` function converts a single value to a time.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/time/
menu:
v2_0_ref:
name: time
@@ -25,8 +27,7 @@ The value to convert.
## Examples
```js
from(bucket: "sensor-data")
- |> filter(fn:(r) =>
- r._measurement == "system" and
- )
- |> map(fn:(r) => time(v: r.timestamp))
+ |> range(start: -1m)
+ |> filter(fn:(r) => r._measurement == "system" )
+ |> map(fn:(r) => ({ r with timestamp: time(v: r.timestamp) }))
```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/tobool.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/tobool.md
similarity index 71%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/tobool.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/tobool.md
index aaa7ef658..2cad528d3 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/tobool.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/tobool.md
@@ -3,6 +3,7 @@ title: toBool() function
description: The `toBool()` function converts all values in the `_value` column to booleans.
aliases:
- /v2.0/reference/flux/functions/transformations/type-conversions/tobool
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/tobool/
menu:
v2_0_ref:
name: toBool
@@ -22,7 +23,7 @@ toBool()
{{% note %}}
To convert values in a column other than `_value`, define a custom function
patterned after the [function definition](#function-definition),
-but replace the column in the `bool()` function with your desired column.
+but replace `_value` with your desired column.
{{% /note %}}
## Examples
@@ -39,9 +40,9 @@ from(bucket: "telegraf")
```js
toBool = (tables=<-) =>
tables
- |> map(fn:(r) => bool(v: r._value))
+ |> map(fn:(r) => ({ r with _value: bool(v: r._value) }))
```
_**Used functions:**
-[map()](/v2.0/reference/flux/functions/built-in/transformations/map),
-[bool()](/v2.0/reference/flux/functions/built-in/transformations/type-conversions/bool)_
+[map()](/v2.0/reference/flux/stdlib/built-in/transformations/map),
+[bool()](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/bool)_
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/toduration.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/toduration.md
similarity index 67%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/toduration.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/toduration.md
index c8f22aef7..6ffa8ad51 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/toduration.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/toduration.md
@@ -3,6 +3,7 @@ title: toDuration() function
description: The `toDuration()` function converts all values in the `_value` column to durations.
aliases:
- /v2.0/reference/flux/functions/transformations/type-conversions/toduration
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/toduration/
menu:
v2_0_ref:
name: toDuration
@@ -10,6 +11,10 @@ menu:
weight: 501
---
+{{% warn %}}
+**`toDuration()` was removed in Flux 0.37.**
+{{% /warn %}}
+
The `toDuration()` function converts all values in the `_value` column to durations.
_**Function type:** Type conversion_
@@ -22,7 +27,7 @@ toDuration()
{{% note %}}
To convert values in a column other than `_value`, define a custom function
patterned after the [function definition](#function-definition),
-but replace the column in the `duration()` function with your desired column.
+but replace `_value` with your desired column.
{{% /note %}}
## Examples
@@ -39,9 +44,9 @@ from(bucket: "telegraf")
```js
toDuration = (tables=<-) =>
tables
- |> map(fn:(r) => duration(v: r._value))
+ |> map(fn:(r) => ({ r with _value: duration(v: r._value) }))
```
_**Used functions:**
-[map()](/v2.0/reference/flux/functions/built-in/transformations/map),
-[duration()](/v2.0/reference/flux/functions/built-in/transformations/type-conversions/duration)_
+[map()](/v2.0/reference/flux/stdlib/built-in/transformations/map),
+[duration()](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/duration)_
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/tofloat.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/tofloat.md
similarity index 71%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/tofloat.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/tofloat.md
index 689209f11..cabaf654b 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/tofloat.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/tofloat.md
@@ -3,6 +3,7 @@ title: toFloat() function
description: The `toFloat()` function converts all values in the `_value` column to floats.
aliases:
- /v2.0/reference/flux/functions/transformations/type-conversions/tofloat
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/tofloat/
menu:
v2_0_ref:
name: toFloat
@@ -22,7 +23,7 @@ toFloat()
{{% note %}}
To convert values in a column other than `_value`, define a custom function
patterned after the [function definition](#function-definition),
-but replace the column in the `float()` function with your desired column.
+but replace `_value` with your desired column.
{{% /note %}}
## Examples
@@ -39,9 +40,9 @@ from(bucket: "telegraf")
```js
toFloat = (tables=<-) =>
tables
- |> map(fn:(r) => float(v: r._value))
+ |> map(fn:(r) => ({ r with _value: float(v: r._value) }))
```
_**Used functions:**
-[map()](/v2.0/reference/flux/functions/built-in/transformations/map),
-[float()](/v2.0/reference/flux/functions/built-in/transformations/type-conversions/float)_
+[map()](/v2.0/reference/flux/stdlib/built-in/transformations/map),
+[float()](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/float)_
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/toint.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/toint.md
similarity index 71%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/toint.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/toint.md
index 1731712d4..4053d716b 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/toint.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/toint.md
@@ -3,6 +3,7 @@ title: toInt() function
description: The `toInt()` function converts all values in the `_value` column to integers.
aliases:
- /v2.0/reference/flux/functions/transformations/type-conversions/toint
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/toint/
menu:
v2_0_ref:
name: toInt
@@ -22,7 +23,7 @@ toInt()
{{% note %}}
To convert values in a column other than `_value`, define a custom function
patterned after the [function definition](#function-definition),
-but replace the column in the `int()` function with your desired column.
+but replace `_value` with your desired column.
{{% /note %}}
## Examples
@@ -39,9 +40,9 @@ from(bucket: "telegraf")
```js
toInt = (tables=<-) =>
tables
- |> map(fn:(r) => int(v: r._value))
+ |> map(fn:(r) => ({ r with _value: int(v: r._value) }))
```
_**Used functions:**
-[map()](/v2.0/reference/flux/functions/built-in/transformations/map),
-[int()](/v2.0/reference/flux/functions/built-in/transformations/type-conversions/int)_
+[map()](/v2.0/reference/flux/stdlib/built-in/transformations/map),
+[int()](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/int)_
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/tostring.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/tostring.md
similarity index 71%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/tostring.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/tostring.md
index f6be3607f..99645fa1e 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/tostring.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/tostring.md
@@ -3,6 +3,7 @@ title: toString() function
description: The `toString()` function converts all values in the `_value` column to strings.
aliases:
- /v2.0/reference/flux/functions/transformations/type-conversions/tostring
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/tostring/
menu:
v2_0_ref:
name: toString
@@ -22,7 +23,7 @@ toString()
{{% note %}}
To convert values in a column other than `_value`, define a custom function
patterned after the [function definition](#function-definition),
-but replace the column in the `string()` function with your desired column.
+but replace `_value` with your desired column.
{{% /note %}}
## Examples
@@ -39,9 +40,9 @@ from(bucket: "telegraf")
```js
toString = (tables=<-) =>
tables
- |> map(fn:(r) => string(v: r._value))
+ |> map(fn:(r) => ({ r with _value: string(v: r._value) }))
```
_**Used functions:**
-[map()](/v2.0/reference/flux/functions/built-in/transformations/map),
-[string()](/v2.0/reference/flux/functions/built-in/transformations/type-conversions/string)_
+[map()](/v2.0/reference/flux/stdlib/built-in/transformations/map),
+[string()](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/string)_
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/totime.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/totime.md
similarity index 71%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/totime.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/totime.md
index e4d4b6939..ea36402ee 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/totime.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/totime.md
@@ -3,6 +3,7 @@ title: toTime() function
description: The `toTime()` function converts all values in the `_value` column to times.
aliases:
- /v2.0/reference/flux/functions/transformations/type-conversions/totime
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/totime/
menu:
v2_0_ref:
name: toTime
@@ -22,7 +23,7 @@ toTime()
{{% note %}}
To convert values in a column other than `_value`, define a custom function
patterned after the [function definition](#function-definition),
-but replace the column in the `time()` function with your desired column.
+but replace `_value` with your desired column.
{{% /note %}}
## Examples
@@ -39,9 +40,9 @@ from(bucket: "telegraf")
```js
toTime = (tables=<-) =>
tables
- |> map(fn:(r) => time(v:r._value))
+ |> map(fn:(r) => ({ r with _value: time(v:r._value) }))
```
_**Used functions:**
-[map()](/v2.0/reference/flux/functions/built-in/transformations/map),
-[time()](/v2.0/reference/flux/functions/built-in/transformations/type-conversions/time)_
+[map()](/v2.0/reference/flux/stdlib/built-in/transformations/map),
+[time()](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/time)_
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/touint.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/touint.md
similarity index 71%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/touint.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/touint.md
index 4d5e5f990..d113d28a1 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/touint.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/touint.md
@@ -3,6 +3,7 @@ title: toUInt() function
description: The `toUInt()` function converts all values in the `_value` column to UIntegers.
aliases:
- /v2.0/reference/flux/functions/transformations/type-conversions/touint
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/touint/
menu:
v2_0_ref:
name: toUInt
@@ -22,7 +23,7 @@ toUInt()
{{% note %}}
To convert values in a column other than `_value`, define a custom function
patterned after the [function definition](#function-definition),
-but replace the column in the `uint()` function with your desired column.
+but replace `_value` with your desired column.
{{% /note %}}
## Examples
@@ -39,9 +40,9 @@ from(bucket: "telegraf")
```js
toUInt = (tables=<-) =>
tables
- |> map(fn:(r) => uint(v:r._value))
+ |> map(fn:(r) => ({ r with _value: uint(v:r._value) }))
```
_**Used functions:**
-[map()](/v2.0/reference/flux/functions/built-in/transformations/map),
-[uint()](/v2.0/reference/flux/functions/built-in/transformations/type-conversions/uint)_
+[map()](/v2.0/reference/flux/stdlib/built-in/transformations/map),
+[uint()](/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/uint)_
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/uint.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/uint.md
similarity index 65%
rename from content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/uint.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/uint.md
index a7eb92da6..72f12c2d0 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/type-conversions/uint.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/type-conversions/uint.md
@@ -1,6 +1,8 @@
---
title: uint() function
description: The `uint()` function converts a single value to a UInteger.
+aliases:
+ - /v2.0/reference/flux/functions/built-in/transformations/type-conversions/uint/
menu:
v2_0_ref:
name: uint
@@ -25,8 +27,7 @@ The value to convert.
## Examples
```js
from(bucket: "sensor-data")
- |> filter(fn:(r) =>
- r._measurement == "camera" and
- )
- |> map(fn:(r) => uint(v: r.exposures))
+ |> range(start: -1m)
+ |> filter(fn:(r) => r._measurement == "camera" )
+ |> map(fn:(r) => ({ r with exposures: uint(v: r.exposures) }))
```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/union.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/union.md
similarity index 62%
rename from content/v2.0/reference/flux/functions/built-in/transformations/union.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/union.md
index 449e15ee6..96e60030a 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/union.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/union.md
@@ -3,6 +3,7 @@ title: union() function
description: The `union()` function concatenates two or more input streams into a single output stream.
aliases:
- /v2.0/reference/flux/functions/transformations/union
+ - /v2.0/reference/flux/functions/built-in/transformations/union/
menu:
v2_0_ref:
name: union
@@ -17,11 +18,11 @@ The output schemas of the `union()` function is the union of all input schemas.
`union()` does not preserve the sort order of the rows within tables.
A sort operation may be added if a specific sort order is needed.
-_**Function type:** Transformation_
+_**Function type:** Transformation_
_**Output data type:** Object_
```js
-union(tables: ["table1", "table2"])
+union(tables: [table1, table2])
```
## Parameters
@@ -34,21 +35,13 @@ _**Data type:** Array of streams_
## Examples
```js
-left = from(bucket: "test")
- |> range(start: 2018-05-22T19:53:00Z, stop: 2018-05-22T19:53:50Z)
- |> filter(fn: (r) =>
- r._field == "usage_guest" or
- r._field == "usage_guest_nice"
- )
- |> drop(columns: ["_start", "_stop"])
+bucket1 = from(bucket: "example-bucket-1")
+ |> range(start: -5m)
+ |> filter(fn: (r) => r._field == "usage_guest" or r._field == "usage_guest_nice")
-right = from(bucket: "test")
- |> range(start: 2018-05-22T19:53:50Z, stop: 2018-05-22T19:54:20Z)
- |> filter(fn: (r) =>
- r._field == "usage_guest" or
- r._field == "usage_idle"
- )
- |> drop(columns: ["_start", "_stop"])
+bucket2 = from(bucket: "example-bucket-2")
+ |> range(start: -5m)
+ |> filter(fn: (r) => r._field == "usage_guest" or r._field == "usage_idle")
-union(tables: [left, right])
+union(tables: [bucket1, bucket2])
```
diff --git a/content/v2.0/reference/flux/functions/built-in/transformations/window.md b/content/v2.0/reference/flux/stdlib/built-in/transformations/window.md
similarity index 97%
rename from content/v2.0/reference/flux/functions/built-in/transformations/window.md
rename to content/v2.0/reference/flux/stdlib/built-in/transformations/window.md
index bd9f658aa..6611b35a6 100644
--- a/content/v2.0/reference/flux/functions/built-in/transformations/window.md
+++ b/content/v2.0/reference/flux/stdlib/built-in/transformations/window.md
@@ -3,6 +3,7 @@ title: window() function
description: The `window()` function groups records based on a time value.
aliases:
- /v2.0/reference/flux/functions/transformations/window
+ - /v2.0/reference/flux/functions/built-in/transformations/window/
menu:
v2_0_ref:
name: window
diff --git a/content/v2.0/reference/flux/functions/csv/_index.md b/content/v2.0/reference/flux/stdlib/csv/_index.md
similarity index 86%
rename from content/v2.0/reference/flux/functions/csv/_index.md
rename to content/v2.0/reference/flux/stdlib/csv/_index.md
index f1af202be..e8ebc3e22 100644
--- a/content/v2.0/reference/flux/functions/csv/_index.md
+++ b/content/v2.0/reference/flux/stdlib/csv/_index.md
@@ -4,10 +4,12 @@ list_title: CSV package
description: >
The Flux CSV package provides functions for working with data in annotated CSV format.
Import the `csv` package.
+aliases:
+ - /v2.0/reference/flux/functions/csv/
menu:
v2_0_ref:
name: CSV
- parent: Flux packages and functions
+ parent: Flux standard library
weight: 202
v2.0/tags: [functions, csv, package]
---
diff --git a/content/v2.0/reference/flux/functions/csv/from.md b/content/v2.0/reference/flux/stdlib/csv/from.md
similarity index 94%
rename from content/v2.0/reference/flux/functions/csv/from.md
rename to content/v2.0/reference/flux/stdlib/csv/from.md
index 712555dcd..ea6c16816 100644
--- a/content/v2.0/reference/flux/functions/csv/from.md
+++ b/content/v2.0/reference/flux/stdlib/csv/from.md
@@ -4,6 +4,7 @@ description: The `csv.from()` function retrieves data from a CSV data source.
aliases:
- /v2.0/reference/flux/functions/inputs/fromcsv
- /v2.0/reference/flux/functions/built-in/inputs/fromcsv
+ - /v2.0/reference/flux/functions/csv/from/
menu:
v2_0_ref:
name: csv.from
@@ -28,10 +29,6 @@ csv.from(file: "/path/to/data-file.csv")
csv.from(csv: csvData)
```
-{{% cloud-msg %}}
-`csv.from()` is not available in {{< cloud-name "short" >}}.
-{{% /cloud-msg %}}
-
## Parameters
### file
@@ -40,6 +37,10 @@ The path can be absolute or relative.
If relative, it is relative to the working directory of the `influxd` process.
_The CSV file must exist in the same file system running the `influxd` process._
+{{% cloud-msg %}}
+{{< cloud-name "short" >}} does not support the `file` parameter.
+{{% /cloud-msg %}}
+
_**Data type:** String_
### csv
diff --git a/content/v2.0/reference/flux/stdlib/date/_index.md b/content/v2.0/reference/flux/stdlib/date/_index.md
new file mode 100644
index 000000000..0d603df8b
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/_index.md
@@ -0,0 +1,60 @@
+---
+title: Flux date package
+list_title: Date package
+description: >
+ The Flux date package provides date and time constants and functions.
+ Import the `date` package.
+aliases:
+ - /v2.0/reference/flux/language/built-ins/time-constants/
+ - /v2.0/reference/flux/functions/date/
+menu:
+ v2_0_ref:
+ name: Date
+ parent: Flux standard library
+weight: 202
+v2.0/tags: [date, time, functions]
+---
+
+The Flux date package provides date and time constants and functions.
+Import the `date` package.
+
+```js
+import "date"
+```
+
+## Date and time constants
+That `date` package includes the following date and time constants.
+
+### Days of the week
+Days of the week are represented as integers in the range `[0-6]`.
+
+```js
+date.Sunday = 0
+date.Monday = 1
+date.Tuesday = 2
+date.Wednesday = 3
+date.Thursday = 4
+date.Friday = 5
+date.Saturday = 6
+```
+
+### Months of the year
+Months are represented as integers in the range `[1-12]`.
+
+```js
+date.January = 1
+date.February = 2
+date.March = 3
+date.April = 4
+date.May = 5
+date.June = 6
+date.July = 7
+date.August = 8
+date.September = 9
+date.October = 10
+date.November = 11
+date.December = 12
+```
+
+## Date and time functions
+{{< children type="functions" show="pages" >}}
diff --git a/content/v2.0/reference/flux/stdlib/date/hour.md b/content/v2.0/reference/flux/stdlib/date/hour.md
new file mode 100644
index 000000000..efdf143e7
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/hour.md
@@ -0,0 +1,33 @@
+---
+title: date.hour() function
+description: >
+ The `date.hour()` function returns the hour of a specified time.
+ Results range from `[0-23]`.
+aliases:
+ - /v2.0/reference/flux/functions/date/hour/
+menu:
+ v2_0_ref:
+ name: date.hour
+ parent: Date
+weight: 301
+---
+
+The `date.hour()` function returns the hour of a specified time.
+Results range from `[0-23]`.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.hour(t: 2019-07-17T12:05:21.012Z)
+
+// Returns 12
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
diff --git a/content/v2.0/reference/flux/stdlib/date/microsecond.md b/content/v2.0/reference/flux/stdlib/date/microsecond.md
new file mode 100644
index 000000000..bd4cbba76
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/microsecond.md
@@ -0,0 +1,33 @@
+---
+title: date.microsecond() function
+description: >
+ The `date.microsecond()` function returns the microsecond of a specified time.
+ Results range from `[0-999999]`.
+aliases:
+ - /v2.0/reference/flux/functions/date/microsecond/
+menu:
+ v2_0_ref:
+ name: date.microsecond
+ parent: Date
+weight: 301
+---
+
+The `date.microsecond()` function returns the microsecond of a specified time.
+Results range from `[0-999999]`.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.microsecond(t: 2019-07-17T12:05:21.012934584Z)
+
+// Returns 12934
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
diff --git a/content/v2.0/reference/flux/stdlib/date/millisecond.md b/content/v2.0/reference/flux/stdlib/date/millisecond.md
new file mode 100644
index 000000000..6d7a6285e
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/millisecond.md
@@ -0,0 +1,33 @@
+---
+title: date.millisecond() function
+description: >
+ The `date.millisecond()` function returns the millisecond of a specified time.
+ Results range from `[0-999999]`.
+aliases:
+ - /v2.0/reference/flux/functions/date/millisecond/
+menu:
+ v2_0_ref:
+ name: date.millisecond
+ parent: Date
+weight: 301
+---
+
+The `date.millisecond()` function returns the millisecond of a specified time.
+Results range from `[0-999]`.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.millisecond(t: 2019-07-17T12:05:21.012934584Z)
+
+// Returns 12
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
diff --git a/content/v2.0/reference/flux/stdlib/date/minute.md b/content/v2.0/reference/flux/stdlib/date/minute.md
new file mode 100644
index 000000000..5e5ec0b34
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/minute.md
@@ -0,0 +1,33 @@
+---
+title: date.minute() function
+description: >
+ The `date.minute()` function returns the minute of a specified time.
+ Results range from `[0-59]`.
+aliases:
+ - /v2.0/reference/flux/functions/date/minute/
+menu:
+ v2_0_ref:
+ name: date.minute
+ parent: Date
+weight: 301
+---
+
+The `date.minute()` function returns the minute of a specified time.
+Results range from `[0-59]`.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.minute(t: 2019-07-17T12:05:21.012Z)
+
+// Returns 5
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
diff --git a/content/v2.0/reference/flux/stdlib/date/month.md b/content/v2.0/reference/flux/stdlib/date/month.md
new file mode 100644
index 000000000..6612dedd4
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/month.md
@@ -0,0 +1,33 @@
+---
+title: date.month() function
+description: >
+ The `date.month()` function returns the month of a specified time.
+ Results range from `[1-12]`.
+aliases:
+ - /v2.0/reference/flux/functions/date/month/
+menu:
+ v2_0_ref:
+ name: date.month
+ parent: Date
+weight: 301
+---
+
+The `date.month()` function returns the month of a specified time.
+Results range from `[1-12]`.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.month(t: 2019-07-17T12:05:21.012Z)
+
+// Returns 7
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
diff --git a/content/v2.0/reference/flux/stdlib/date/monthday.md b/content/v2.0/reference/flux/stdlib/date/monthday.md
new file mode 100644
index 000000000..fabc7d31b
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/monthday.md
@@ -0,0 +1,33 @@
+---
+title: date.monthDay() function
+description: >
+ The `date.monthDay()` function returns the day of the month for a specified time.
+ Results range from `[1-31]`.
+aliases:
+ - /v2.0/reference/flux/functions/date/monthday/
+menu:
+ v2_0_ref:
+ name: date.monthDay
+ parent: Date
+weight: 301
+---
+
+The `date.monthDay()` function returns the day of the month for a specified time.
+Results range from `[1-31]`.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.monthDay(t: 2019-07-17T12:05:21.012Z)
+
+// Returns 17
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
diff --git a/content/v2.0/reference/flux/stdlib/date/nanosecond.md b/content/v2.0/reference/flux/stdlib/date/nanosecond.md
new file mode 100644
index 000000000..9d05b1f72
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/nanosecond.md
@@ -0,0 +1,33 @@
+---
+title: date.nanosecond() function
+description: >
+ The `date.nanosecond()` function returns the nanosecond of a specified time.
+ Results range from `[0-999999999]`.
+aliases:
+ - /v2.0/reference/flux/functions/date/nanosecond/
+menu:
+ v2_0_ref:
+ name: date.nanosecond
+ parent: Date
+weight: 301
+---
+
+The `date.nanosecond()` function returns the nanosecond of a specified time.
+Results range from `[0-999999999]`.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.nanosecond(t: 2019-07-17T12:05:21.012934584Z)
+
+// Returns 12934584
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
diff --git a/content/v2.0/reference/flux/stdlib/date/quarter.md b/content/v2.0/reference/flux/stdlib/date/quarter.md
new file mode 100644
index 000000000..a2f05b0f9
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/quarter.md
@@ -0,0 +1,33 @@
+---
+title: date.quarter() function
+description: >
+ The `date.quarter()` function returns the quarter of the year for a specified time.
+ Results range from `[1-4]`.
+aliases:
+ - /v2.0/reference/flux/functions/date/quarter/
+menu:
+ v2_0_ref:
+ name: date.quarter
+ parent: Date
+weight: 301
+---
+
+The `date.quarter()` function returns the quarter of the year for a specified time.
+Results range from `[1-4]`.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.quarter(t: 2019-07-17T12:05:21.012Z)
+
+// Returns 3
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
diff --git a/content/v2.0/reference/flux/stdlib/date/second.md b/content/v2.0/reference/flux/stdlib/date/second.md
new file mode 100644
index 000000000..e7cc28f7f
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/second.md
@@ -0,0 +1,33 @@
+---
+title: date.second() function
+description: >
+ The `date.second()` function returns the second of a specified time.
+ Results range from `[0-59]`.
+aliases:
+ - /v2.0/reference/flux/functions/date/second/
+menu:
+ v2_0_ref:
+ name: date.second
+ parent: Date
+weight: 301
+---
+
+The `date.second()` function returns the second of a specified time.
+Results range from `[0-59]`.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.second(t: 2019-07-17T12:05:21.012Z)
+
+// Returns 21
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
diff --git a/content/v2.0/reference/flux/stdlib/date/truncate.md b/content/v2.0/reference/flux/stdlib/date/truncate.md
new file mode 100644
index 000000000..762cfffcd
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/truncate.md
@@ -0,0 +1,59 @@
+---
+title: date.truncate() function
+description: >
+ The `date.truncate()` function truncates a time to a specified unit.
+aliases:
+ - /v2.0/reference/flux/functions/date/truncate/
+menu:
+ v2_0_ref:
+ name: date.truncate
+ parent: Date
+weight: 301
+---
+
+The `date.truncate()` function truncates a time to a specified unit.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.truncate(
+ t: 2019-07-17T12:05:21.012Z
+ unit: 1s
+)
+
+// Returns 2019-07-17T12:05:21.000000000Z
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
+
+### unit
+The unit of time to truncate to.
+
+_**Data type:** Duration_
+
+{{% note %}}
+Only use `1` and the unit of time to specify the `unit`.
+For example: `1s`, `1m`, `1h`.
+{{% /note %}}
+
+## Examples
+```js
+import "date"
+
+date.truncate(t: "2019-06-03T13:59:01.000000000Z", unit: 1s)
+// Returns 2019-06-03T13:59:01.000000000Z
+
+date.truncate(t: "2019-06-03T13:59:01.000000000Z", unit: 1m)
+// Returns 2019-06-03T13:59:00.000000000Z
+
+date.truncate(t: "2019-06-03T13:59:01.000000000Z", unit: 1h)
+// Returns 2019-06-03T13:00:00.000000000Z
+
+```
diff --git a/content/v2.0/reference/flux/stdlib/date/week.md b/content/v2.0/reference/flux/stdlib/date/week.md
new file mode 100644
index 000000000..dec8bddec
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/week.md
@@ -0,0 +1,33 @@
+---
+title: date.week() function
+description: >
+ The `date.week()` function returns the ISO week of the year for a specified time.
+ Results range from `[1-53]`.
+aliases:
+ - /v2.0/reference/flux/functions/date/week/
+menu:
+ v2_0_ref:
+ name: date.week
+ parent: Date
+weight: 301
+---
+
+The `date.week()` function returns the ISO week of the year for a specified time.
+Results range from `[1-53]`.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.week(t: 2019-07-17T12:05:21.012Z)
+
+// Returns 29
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
diff --git a/content/v2.0/reference/flux/stdlib/date/weekday.md b/content/v2.0/reference/flux/stdlib/date/weekday.md
new file mode 100644
index 000000000..6e6fdb18c
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/weekday.md
@@ -0,0 +1,33 @@
+---
+title: date.weekDay() function
+description: >
+ The `date.weekDay()` function returns the day of the week for a specified time.
+ Results range from `[0-6]`.
+aliases:
+ - /v2.0/reference/flux/functions/date/weekday/
+menu:
+ v2_0_ref:
+ name: date.weekDay
+ parent: Date
+weight: 301
+---
+
+The `date.weekDay()` function returns the day of the week for a specified time.
+Results range from `[0-6]`.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.weekDay(t: 2019-07-17T12:05:21.012Z)
+
+// Returns 3
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
diff --git a/content/v2.0/reference/flux/stdlib/date/year.md b/content/v2.0/reference/flux/stdlib/date/year.md
new file mode 100644
index 000000000..f911a5976
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/year.md
@@ -0,0 +1,31 @@
+---
+title: date.year() function
+description: >
+ The `date.year()` function returns the year of a specified time.
+aliases:
+ - /v2.0/reference/flux/functions/date/year/
+menu:
+ v2_0_ref:
+ name: date.year
+ parent: Date
+weight: 301
+---
+
+The `date.year()` function returns the year of a specified time.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.year(t: 2019-07-17T12:05:21.012Z)
+
+// Returns 2019
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
diff --git a/content/v2.0/reference/flux/stdlib/date/yearday.md b/content/v2.0/reference/flux/stdlib/date/yearday.md
new file mode 100644
index 000000000..20df57afe
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/date/yearday.md
@@ -0,0 +1,33 @@
+---
+title: date.yearDay() function
+description: >
+ The `date.yearDay()` function returns the day of the year for a specified time.
+ Results range from `[1-365]` for non-leap years, and `[1-366]` in leap years.
+aliases:
+ - /v2.0/reference/flux/functions/date/yearday/
+menu:
+ v2_0_ref:
+ name: date.yearDay
+ parent: Date
+weight: 301
+---
+
+The `date.yearDay()` function returns the day of the year for a specified time.
+Results include leap days and range from `[1-366]`.
+
+_**Function type:** Transformation_
+
+```js
+import "date"
+
+date.yearDay(t: 2019-07-17T12:05:21.012Z)
+
+// Returns 198
+```
+
+## Parameters
+
+### t
+The time to operate on.
+
+_**Data type:** Time_
diff --git a/content/v2.0/reference/flux/stdlib/experimental/_index.md b/content/v2.0/reference/flux/stdlib/experimental/_index.md
new file mode 100644
index 000000000..678830f89
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/experimental/_index.md
@@ -0,0 +1,43 @@
+---
+title: Flux Experimental package
+list_title: Experimental package
+description: >
+ The Flux Experimental package includes experimental functions that perform various tasks.
+ Experimental functions are subject to change at any time and are not recommended for production use.
+menu:
+ v2_0_ref:
+ name: Experimental
+ parent: Flux standard library
+weight: 202
+v2.0/tags: [functions, experimental, package]
+---
+
+The Flux Experimental package includes experimental functions that perform various tasks.
+
+{{% warn %}}
+### Use experimental functions at your own risk
+Experimental functions are subject to change and are **not recommended for production use**.
+At any time, experimental functions and packages may:
+
+- be moved or promoted to a permanent location
+- undergo API changes
+- stop working with no planned fixes
+- be removed without warning nor published explanation
+
+**By using experimental functions and packages, you agree to these risks.**
+{{% /warn %}}
+
+## Experimental functions
+The following functions are part of the base experimental package.
+To use them, import the `experimental` package.
+
+```js
+import "experimental"
+```
+
+{{< children type="functions" show="pages" >}}
+
+## Experimental packages
+Experimental packages require different import paths than base experimental functions.
+
+{{< children show="sections" >}}
diff --git a/content/v2.0/reference/flux/stdlib/experimental/addduration.md b/content/v2.0/reference/flux/stdlib/experimental/addduration.md
new file mode 100644
index 000000000..7db154248
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/experimental/addduration.md
@@ -0,0 +1,61 @@
+---
+title: experimental.addDuration() function
+description: >
+ The `experimental.addDuration()` function adds a duration to a time value and
+ returns the resulting time.
+menu:
+ v2_0_ref:
+ name: experimental.addDuration
+ parent: Experimental
+weight: 201
+related:
+ - /v2.0/reference/flux/stdlib/experimental/subduration/
+---
+
+The `experimental.addDuration()` function adds a duration to a time value and
+returns the resulting time value.
+
+_**Function type:** Transformation_
+
+{{% warn %}}
+The `experimental.addDuration()` function is subject to change at any time.
+By using this function, you accept the [risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+
+This specific function will be removed once duration vectors are implemented.
+See [influxdata/flux#413](https://github.com/influxdata/flux/issues/413).
+{{% /warn %}}
+
+```js
+import "experimental"
+
+experimental.addDuration(
+ d: 12h,
+ to: now(),
+)
+```
+
+## Parameters
+
+### d
+The duration to add.
+
+_**Data type:** Duration_
+
+### to
+The time to add the [duration](#d) to.
+
+_**Data type:** Time_
+
+## Examples
+
+### Add six hours to a timestamp
+```js
+import "experimental"
+
+experimental.addDuration(
+ d: 6h,
+ to: 2019-09-16T12:00:00Z,
+)
+
+// Returns 2019-09-16T18:00:00.000000000Z
+```
diff --git a/content/v2.0/reference/flux/stdlib/experimental/bigtable/_index.md b/content/v2.0/reference/flux/stdlib/experimental/bigtable/_index.md
new file mode 100644
index 000000000..4063fc946
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/experimental/bigtable/_index.md
@@ -0,0 +1,29 @@
+---
+title: Flux Bigtable package
+list_title: Bigtable package
+description: >
+ The Flux Bigtable package provides tools for working with data in Google Cloud Bigtable databases.
+ Import the `experimental/bigtable` package.
+menu:
+ v2_0_ref:
+ name: Bigtable
+ parent: Experimental
+weight: 201
+v2.0/tags: [functions, bigtable, package, google]
+---
+
+The Flux Bigtable package provides tools for working with data in
+[Google Cloud Bigtable](https://cloud.google.com/bigtable/) databases.
+
+{{% warn %}}
+The Bigtable package is currently experimental and subject to change at any time.
+By using this package, you accept the [risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+{{% /warn %}}
+
+Import the `experimental/bigtable` package:
+
+```js
+import "experimental/bigtable"
+```
+
+{{< children type="functions" show="pages" >}}
diff --git a/content/v2.0/reference/flux/stdlib/experimental/bigtable/from.md b/content/v2.0/reference/flux/stdlib/experimental/bigtable/from.md
new file mode 100644
index 000000000..80cf01793
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/experimental/bigtable/from.md
@@ -0,0 +1,59 @@
+---
+title: bigtable.from() function
+description: >
+ The `bigtable.from()` function retrieves data from a Google Cloud Bigtable data source.
+menu:
+ v2_0_ref:
+ name: bigtable.from
+ parent: Bigtable
+weight: 301
+---
+
+The `bigtable.from()` function retrieves data from a [Google Cloud Bigtable](https://cloud.google.com/bigtable/)
+data source.
+
+_**Function type:** Input_
+
+{{% warn %}}
+The `bigtable.from()` function is currently experimental and subject to change at any time.
+By using this function, you accept the [risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+{{% /warn %}}
+
+```js
+import "experimental/bigtable"
+
+bigtable.from(
+ token: "mySuPeRseCretTokEn",
+ project: "exampleProjectID",
+ instance: "exampleInstanceID",
+ table: "example-table"
+)
+```
+
+## Parameters
+
+### token
+The Google Cloud IAM token to use to access the Cloud Bigtable database.
+
+_For more information, see the following:_
+
+- [Cloud Bigtable Access Control](https://cloud.google.com/bigtable/docs/access-control)
+- [Google Cloud IAM How-to guides](https://cloud.google.com/iam/docs/how-to)
+- [Setting Up Authentication for Server to Server Production Applications on Google Cloud](https://cloud.google.com/docs/authentication/production)
+
+_**Data type:** String_
+
+### project
+The project ID of the Cloud Bigtable project to retrieve data from.
+
+_**Data type:** String_
+
+### instance
+The instance ID of the Cloud Bigtable instance to retrieve data from.
+
+_**Data type:** String_
+
+### table
+The name of the Cloud Bigtable table to retrieve data from.
+
+_**Data type:** String_
diff --git a/content/v2.0/reference/flux/stdlib/experimental/group.md b/content/v2.0/reference/flux/stdlib/experimental/group.md
new file mode 100644
index 000000000..d5d69f127
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/experimental/group.md
@@ -0,0 +1,61 @@
+---
+title: experimental.group() function
+description: >
+ The `experimental.group()` function introduces an `extend` mode to the existing
+ `group()` function.
+menu:
+ v2_0_ref:
+ name: experimental.group
+ parent: Experimental
+weight: 201
+related:
+ - /v2.0/reference/flux/stdlib/built-in/transformations/group/
+---
+
+The `experimental.group()` function introduces an `extend` mode to the existing
+[`group()`](/v2.0/reference/flux/stdlib/built-in/transformations/group/) function.
+
+_**Function type:** Transformation_
+
+{{% warn %}}
+The `experimental.group()` function is subject to change at any time.
+By using this function, you accept the [risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+
+This specific function will be removed once the proposed `extend` mode is sufficiently vetted.
+{{% /warn %}}
+
+```js
+import "experimental"
+
+experimental.group(columns: ["host", "_measurement"], mode:"extend")
+```
+
+## Parameters
+
+### columns
+List of columns to use in the grouping operation.
+Defaults to `[]`.
+
+_**Data type:** Array of strings_
+
+### mode
+The mode used to group columns.
+
+_**Data type:** String_
+
+{{% note %}}
+`extend` is the only mode available to `experimental.group()`.
+{{% /note %}}
+
+#### extend
+Appends columns defined in the [`columns` parameter](#columns) to all existing
+[group keys](/v2.0/query-data/get-started/#group-keys).
+
+## Examples
+
+###### Include the value column in each groups' group key
+```js
+from(bucket: "example-bucket")
+ |> range(start: -1m)
+ |> group(columns: ["_value"], mode: "extend")
+```
diff --git a/content/v2.0/reference/flux/stdlib/experimental/mqtt/_index.md b/content/v2.0/reference/flux/stdlib/experimental/mqtt/_index.md
new file mode 100644
index 000000000..0154badc9
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/experimental/mqtt/_index.md
@@ -0,0 +1,28 @@
+---
+title: Flux MQTT package
+list_title: MQTT package
+description: >
+ The Flux MQTT package provides functions for working with MQTT protocol.
+ Import the `experimental/mqtt` package.
+menu:
+ v2_0_ref:
+ name: MQTT
+ parent: Experimental
+weight: 201
+v2.0/tags: [functions, mqtt, package]
+---
+
+Flux MQTT functions provide tools for working with Message Queuing Telemetry Transport (MQTT) protocol.
+
+{{% warn %}}
+The MQTT package is currently experimental and subject to change at any time.
+By using this package, you accept the [risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+{{% /warn %}}
+
+Import the `experimental/mqtt` package:
+
+```js
+import "experimental/mqtt"
+```
+
+{{< children type="functions" show="pages" >}}
diff --git a/content/v2.0/reference/flux/stdlib/experimental/mqtt/to.md b/content/v2.0/reference/flux/stdlib/experimental/mqtt/to.md
new file mode 100644
index 000000000..e90199f49
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/experimental/mqtt/to.md
@@ -0,0 +1,134 @@
+---
+title: mqtt.to() function
+description: >
+ The `mqtt.to()` function outputs data to an MQTT broker using MQTT protocol.
+menu:
+ v2_0_ref:
+ name: mqtt.to
+ parent: MQTT
+weight: 301
+---
+
+The `mqtt.to()` function outputs data to an MQTT broker using MQTT protocol.
+
+_**Function type:** Output_
+
+{{% warn %}}
+The `mqtt.to()` function is currently experimental and subject to change at any time.
+By using this function, you accept the [risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+{{% /warn %}}
+
+```js
+import "experimental/mqtt"
+
+mqtt.to(
+ broker: "tcp://localhost:8883",
+ topic: "example-topic",
+ message: "Example message",
+ qos: 0,
+ clientid: "flux-mqtt",
+ username: "username",
+ password: "password",
+ name: "name-example",
+ timeout: 1s,
+ timeColumn: "_time",
+ tagColumns: ["tag1", "tag2"],
+ valueColumns: ["_value"]
+)
+```
+
+## Parameters
+
+### broker
+The MQTT broker connection string.
+
+_**Data type:** String_
+
+### topic
+The MQTT topic to send data to.
+
+_**Data type:** String_
+
+### message
+The message or payload to send to the MQTT broker.
+The default payload is an output table.
+If there are multiple output tables, it sends each table as a separate MQTT message.
+
+{{% note %}}
+When you specify a message, the function sends the message string only (no output table).
+{{% /note %}}
+
+_**Data type:** String_
+
+### qos
+The [MQTT Quality of Service (QoS)](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901103) level.
+Values range from `[0-2]`.
+Default is `0`.
+
+_**Data type:** Integer_
+
+### clientid
+The MQTT client ID.
+
+_**Data type:** String_
+
+### username
+The username to send to the MQTT broker.
+Username is only required if the broker requires authentication.
+If you provide a username, you must provide a [password](#password).
+
+_**Data type:** String_
+
+### password
+The password to send to the MQTT broker.
+Password is only required if the broker requires authentication.
+If you provide a password, you must provide a [username](#username).
+
+_**Data type:** String_
+
+### name
+_(Optional)_ The name for the MQTT message.
+
+_**Data type:** String_
+
+### timeout
+The MQTT connection timeout.
+Default is `1s`.
+
+_**Data type:** Duration_
+
+### timeColumn
+The column to use as time values in the output line protocol.
+Default is `"_time"`.
+
+_**Data type:** String_
+
+### tagColumns
+The columns to use as tag sets in the output line protocol.
+Default is `[]`.
+
+_**Data type: Array of strings**_
+
+### valueColumns
+The columns to use as field values in the output line protocol.
+Default is `["_value"]`.
+
+_**Data type: Array of strings**_
+
+## Examples
+
+### Send data to an MQTT endpoint
+```js
+import "experimental/mqtt"
+
+from(bucket: "example-bucket")
+ |> range(start: -5m)
+ |> filter(fn: (r) => r._measurement == "airSensor")
+ |> mqtt.to(
+ broker: "tcp://localhost:8883",
+ topic: "air-sensors",
+ clientid: "sensor-12a4",
+ tagColumns: ["sensorID"],
+ valueColumns: ["_value"]
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/experimental/objectkeys.md b/content/v2.0/reference/flux/stdlib/experimental/objectkeys.md
new file mode 100644
index 000000000..f26a1cd9d
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/experimental/objectkeys.md
@@ -0,0 +1,53 @@
+---
+title: experimental.objectKeys() function
+description: >
+ The `experimental.objectKeys()` function returns an array of keys in a specified object.
+menu:
+ v2_0_ref:
+ name: experimental.objectKeys
+ parent: Experimental
+weight: 201
+---
+
+The `experimental.objectKeys()` function returns an array of keys in a specified object.
+
+_**Function type:** Transformation_
+
+{{% warn %}}
+The `experimental.objectKeys()` function is subject to change at any time.
+By using this function, you accept the [risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+{{% /warn %}}
+
+```js
+import "experimental"
+
+experimental.objectKeys(
+ o: {key1: "value1", key2: "value2"}
+)
+
+// Returns [key1, key2]
+```
+
+## Parameters
+
+### o
+The object to return keys from.
+
+_**Data type:** Object_
+
+## Examples
+
+### Return all keys in an object
+```js
+import "experimental"
+
+user = {
+ firstName: "John",
+ lastName: "Doe",
+ age: 42
+}
+
+experimental.objectKeys(o: user)
+
+// Returns [firstName, lastName, age]
+```
diff --git a/content/v2.0/reference/flux/stdlib/experimental/prometheus/_index.md b/content/v2.0/reference/flux/stdlib/experimental/prometheus/_index.md
new file mode 100644
index 000000000..b4f23b76f
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/experimental/prometheus/_index.md
@@ -0,0 +1,29 @@
+---
+title: Flux Prometheus package
+list_title: Prometheus package
+description: >
+ The Flux Prometheus package provides functions for working with Prometheus-formatted metrics.
+ Import the `experimental/prometheus` package.
+menu:
+ v2_0_ref:
+ name: Prometheus
+ parent: Experimental
+weight: 201
+v2.0/tags: [functions, prometheus, package]
+---
+
+Flux Prometheus functions provide tools for working with
+[Prometheus-formatted metrics](https://prometheus.io/docs/instrumenting/exposition_formats/).
+
+{{% warn %}}
+The Prometheus package is currently experimental and subject to change at any time.
+By using this package, you accept the [risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+{{% /warn %}}
+
+Import the `experimental/prometheus` package:
+
+```js
+import "experimental/prometheus"
+```
+
+{{< children type="functions" show="pages" >}}
diff --git a/content/v2.0/reference/flux/stdlib/experimental/prometheus/scrape.md b/content/v2.0/reference/flux/stdlib/experimental/prometheus/scrape.md
new file mode 100644
index 000000000..c7da1bd84
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/experimental/prometheus/scrape.md
@@ -0,0 +1,52 @@
+---
+title: prometheus.scrape() function
+description: >
+ The `prometheus.scrape()` function retrieves Prometheus-formatted metrics
+ from a specified URL.
+menu:
+ v2_0_ref:
+ name: prometheus.scrape
+ parent: Prometheus
+weight: 301
+related:
+ - /v2.0/write-data/scrape-data/scrapable-endpoints/
+---
+
+The `prometheus.scrape()` function retrieves [Prometheus-formatted metrics](https://prometheus.io/docs/instrumenting/exposition_formats/)
+from a specified URL.
+The function groups metrics (including histogram and summary values) into individual tables.
+
+_**Function type:** Input_
+
+{{% warn %}}
+The `prometheus.scrape()` function is currently experimental and subject to change at any time.
+By using this function, you accept the [risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+{{% /warn %}}
+
+```js
+import "experimental/prometheus"
+
+prometheus.scrape(
+ url: "http://localhost:9999/metrics"
+)
+```
+
+## Parameters
+
+### url
+The URL to scrape Prometheus-formatted metrics from.
+
+_**Data type:** String_
+
+## Examples
+
+### Scrape Prometheus metrics and write them to InfluxDB
+```js
+import "experimental/prometheus"
+
+prometheus.scrape(url: "https://example-url.com/metrics")
+ |> to(
+ org: "example-org",
+ bucket: "example-bucket"
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/experimental/set.md b/content/v2.0/reference/flux/stdlib/experimental/set.md
new file mode 100644
index 000000000..d32db0c5b
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/experimental/set.md
@@ -0,0 +1,75 @@
+---
+title: experimental.set() function
+description: >
+ The `experimental.set()` function sets multiple static column values on all records.
+menu:
+ v2_0_ref:
+ name: experimental.set
+ parent: Experimental
+weight: 201
+related:
+ - /v2.0/reference/flux/stdlib/built-in/transformations/set/
+---
+
+The `experimental.set()` function sets multiple static column values on all records.
+If a column already exists, the function updates the existing value.
+If a column does not exist, the function adds it with the specified value.
+
+_Once sufficiently vetted, `experimental.set()` will replace the existing
+[`set()` function](/v2.0/reference/flux/stdlib/built-in/transformations/set/)._
+
+_**Function type:** Transformation_
+
+{{% warn %}}
+The `experimental.set()` function is subject to change at any time.
+By using this function, you accept the [risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+{{% /warn %}}
+
+```js
+import "experimental"
+
+experimental.set(
+ o: {column1: "value1", column2: "value2"}
+)
+```
+
+## Parameters
+
+### o
+An object that defines the columns and values to set.
+The key of each key-value pair defines the column name.
+The value of each key-value pair defines the column value.
+
+_**Data type:** Object_
+
+## Examples
+
+### Set values for multiple columns
+
+##### Example input table
+| _time | _field | _value |
+|:----- |:------ | ------:|
+| 2019-09-16T12:00:00Z | temp | 71.2 |
+| 2019-09-17T12:00:00Z | temp | 68.4 |
+| 2019-09-18T12:00:00Z | temp | 70.8 |
+
+##### Example query
+```js
+import "experimental"
+
+data
+ |> experimental.set(
+ o: {
+ _field: "temperature",
+ unit: "°F",
+ location: "San Francisco"
+ }
+ )
+```
+
+##### Example output table
+| _time | _field | _value | unit | location |
+|:----- |:------ | ------:|:----:| -------- |
+| 2019-09-16T12:00:00Z | temperature | 71.2 | °F | San Francisco |
+| 2019-09-17T12:00:00Z | temperature | 68.4 | °F | San Francisco |
+| 2019-09-18T12:00:00Z | temperature | 70.8 | °F | San Francisco |
diff --git a/content/v2.0/reference/flux/stdlib/experimental/subduration.md b/content/v2.0/reference/flux/stdlib/experimental/subduration.md
new file mode 100644
index 000000000..f715f6a77
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/experimental/subduration.md
@@ -0,0 +1,61 @@
+---
+title: experimental.subDuration() function
+description: >
+ The `experimental.subDuration()` function subtracts a duration from a time value and
+ returns a the resulting time value.
+menu:
+ v2_0_ref:
+ name: experimental.subDuration
+ parent: Experimental
+weight: 201
+related:
+ - /v2.0/reference/flux/stdlib/experimental/addduration/
+---
+
+The `experimental.subDuration()` function subtracts a duration from a time value and
+returns the resulting time value.
+
+_**Function type:** Transformation_
+
+{{% warn %}}
+The `experimental.subDuration()` function is subject to change at any time.
+By using this function, you accept the [risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+
+This specific function will be removed once duration vectors are implemented.
+See [influxdata/flux#413](https://github.com/influxdata/flux/issues/413).
+{{% /warn %}}
+
+```js
+import "experimental"
+
+experimental.subDuration(
+ d: 12h,
+ from: now(),
+)
+```
+
+## Parameters
+
+### d
+The duration to subtract.
+
+_**Data type:** Duration_
+
+### from
+The time to subtract the [duration](#d) from.
+
+_**Data type:** Time_
+
+## Examples
+
+### Subtract six hours from a timestamp
+```js
+import "experimental"
+
+experimental.subDuration(
+ d: 6h,
+ from: 2019-09-16T12:00:00Z,
+)
+
+// Returns 2019-09-16T06:00:00.000000000Z
+```
diff --git a/content/v2.0/reference/flux/stdlib/experimental/to.md b/content/v2.0/reference/flux/stdlib/experimental/to.md
new file mode 100644
index 000000000..6d39f2d4f
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/experimental/to.md
@@ -0,0 +1,110 @@
+---
+title: experimental.to() function
+description: >
+ The `experimental.to()` function writes data to an InfluxDB v2.0 bucket.
+ The function structures data differently than the built-in `to()` function.
+menu:
+ v2_0_ref:
+ name: experimental.to
+ parent: Experimental
+weight: 201
+related:
+ - /v2.0/reference/flux/stdlib/built-in/outputs/to/
+---
+
+The `experimental.to()` function writes data to an InfluxDB v2.0 bucket, but in
+a [different structure](#expected-data-structure) than the
+[built-in `to()` function](/v2.0/reference/flux/stdlib/built-in/outputs/to/).
+
+_**Function type:** Output_
+
+{{% warn %}}
+The `experimental.to()` function is subject to change at any time.
+By using this function, you accept the [risks of experimental functions](/v2.0/reference/flux/stdlib/experimental/#use-experimental-functions-at-your-own-risk).
+{{% /warn %}}
+
+```js
+import "experimental"
+
+experimental.to(
+ bucket: "my-bucket",
+ org: "my-org"
+)
+
+// OR
+
+experimental.to(
+ bucketID: "1234567890",
+ orgID: "0987654321"
+)
+```
+
+### Expected data structure
+
+#### Data structure expected by built-in to()
+The built-in `to()` function requires `_time`, `_measurement`, `_field`, and `_value` columns.
+The `_field` column stores the **field key** and the `_value` column stores the **field value**.
+
+| _time | _measurement | _field | _value |
+| ----- | ------------ | ------ | ------ |
+| timestamp | measurement-name | field key | field value |
+
+#### Data structure expected by experimental to()
+`experimental.to()` requires `_time` and `measurement` columns, but field keys
+and values are stored in single columns with the **field key** as the **column name** and
+the **field value** as the **column value**.
+
+| _time | _measurement | field_key |
+| ----- | ------------ | --------- |
+| timestamp | measurement-name | field value |
+
+If using the built-in `from()` function, use [`pivot()`](/v2.0/reference/flux/stdlib/transformations/pivot/)
+to transform data into the structure `experimetnal.to()` expects.
+_[See the example below](#use-pivot-to-shape-data-for-experimental-to)._
+
+## Parameters
+
+### bucket
+The bucket to write data to.
+`bucket` and `bucketID` are mutually exclusive.
+
+_**Data type:** String_
+
+### bucketID
+The ID of the bucket to write data to.
+`bucketID` and `bucket` are mutually exclusive.
+
+_**Data type:** String_
+
+### org
+The organization name of the specified [`bucket`](#bucket).
+Only required when writing to a different organization or a remote host.
+`org` and `orgID` are mutually exclusive.
+
+_**Data type:** String_
+
+### orgID
+The organization ID of the specified [`bucket`](#bucket).
+Only required when writing to a different organization or a remote host.
+`orgID` and `org` are mutually exclusive.
+
+_**Data type:** String_
+
+
+## Examples
+
+##### Use pivot() to shape data for experimental.to()
+```js
+import "experimental"
+
+from(bucket: "example-bucket")
+ |> range(start: -1h)
+ |> pivot(
+ rowKey:["_time"],
+ columnKey: ["_field"],
+ valueColumn: "_value")
+ |> experimental.to(
+ bucket: "bucket-name",
+ org: "org-name"
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/http/_index.md b/content/v2.0/reference/flux/stdlib/http/_index.md
new file mode 100644
index 000000000..7145da674
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/http/_index.md
@@ -0,0 +1,24 @@
+---
+title: Flux HTTP package
+list_title: HTTP package
+description: >
+ The Flux HTTP package provides functions for transferring data using the HTTP protocol.
+ Import the `http` package.
+aliases:
+ - /v2.0/reference/flux/functions/http/
+menu:
+ v2_0_ref:
+ name: HTTP
+ parent: Flux standard library
+weight: 202
+v2.0/tags: [functions, http, package]
+---
+
+The Flux HTTP package provides functions for transferring data using the HTTP protocol.
+Import the `http` package:
+
+```js
+import "http"
+```
+
+{{< children type="functions" show="pages" >}}
diff --git a/content/v2.0/reference/flux/stdlib/http/basicauth.md b/content/v2.0/reference/flux/stdlib/http/basicauth.md
new file mode 100644
index 000000000..746f84ba2
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/http/basicauth.md
@@ -0,0 +1,58 @@
+---
+title: http.basicAuth() function
+description: >
+ The `http.basicAuth()` function returns a Base64-encoded basic authentication
+ header using a specified username and password combination.
+aliases:
+ - /v2.0/reference/flux/functions/http/basicauth/
+menu:
+ v2_0_ref:
+ name: http.basicAuth
+ parent: HTTP
+weight: 202
+---
+
+The `http.basicAuth()` function returns a Base64-encoded basic authentication
+header using a specified username and password combination.
+
+_**Function type:** Miscellaneous_
+
+```js
+import "http"
+
+http.basicAuth(
+ u: "username"
+ p: "passw0rd"
+)
+
+// Returns "Basic dXNlcm5hbWU6cGFzc3cwcmQ="
+```
+
+## Parameters
+
+### u
+The username to use in the basic authentication header.
+
+_**Data type:** String_
+
+### p
+The password to use in the basic authentication header.
+
+_**Data type:** String_
+
+## Examples
+
+##### Set a basic authentication header in an HTTP POST request
+```js
+import "monitor"
+import "http"
+
+username = "myawesomeuser"
+password = "mySupErSecRetPasSW0rD"
+
+http.post(
+ url: "http://myawesomesite.com/api/",
+ headers: {Authorization: http.basicAuth(u:username, p:password)},
+ data: bytes(v: "something I want to send.")
+)
+```
diff --git a/content/v2.0/reference/flux/stdlib/http/endpoint.md b/content/v2.0/reference/flux/stdlib/http/endpoint.md
new file mode 100644
index 000000000..57a2f4545
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/http/endpoint.md
@@ -0,0 +1,50 @@
+---
+title: http.endpoint() function
+description: >
+ The `http.endpoint()` function sends output data to an HTTP URL using the POST request method.
+aliases:
+ - /v2.0/reference/flux/functions/http/endpoint/
+menu:
+ v2_0_ref:
+ name: http.endpoint
+ parent: HTTP
+weight: 202
+v2.0/tags: [endpoints]
+---
+
+The `http.endpoint()` function sends output data to an HTTP URL using the POST request method.
+
+_**Function type:** Output_
+
+```js
+import "http"
+
+http.endpoint(
+ url: "http://localhost:1234/"
+)
+```
+
+## Parameters
+
+### url
+The URL to POST to.
+
+_**Data type:** String_
+
+### mapFn
+A function that builds the object used to generate the POST request.
+
+{{% note %}}
+_You should rarely need to override the default `mapFn` parameter.
+To see the default `mapFn` value or for insight into possible overrides, view the
+[`http.endpoint()` source code](https://github.com/influxdata/flux/blob/master/stdlib/http/http.flux)._
+{{% /note %}}
+
+_**Data type:** Function_
+
+The returned object must include the following fields:
+
+- `headers`
+- `data`
+
+_For more information, see [`http.post()`](/v2.0/reference/flux/stdlib/http/post/)_
diff --git a/content/v2.0/reference/flux/stdlib/http/post.md b/content/v2.0/reference/flux/stdlib/http/post.md
new file mode 100644
index 000000000..19c06f08a
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/http/post.md
@@ -0,0 +1,67 @@
+---
+title: http.post() function
+description: >
+ The `http.post()` function submits an HTTP POST request to the specified URL with headers and data.
+ The HTTP status code is returned.
+aliases:
+ - /v2.0/reference/flux/functions/http/post/
+menu:
+ v2_0_ref:
+ name: http.post
+ parent: HTTP
+weight: 202
+---
+
+The `http.post()` function submits an HTTP POST request to the specified URL with
+headers and data and returns the HTTP status code.
+
+_**Function type:** Output_
+
+```js
+import "http"
+
+http.post(
+ url: "http://localhost:9999/",
+ headers: {x:"a", y:"b", z:"c"},
+ data: bytes(v: "body")
+)
+```
+
+## Parameters
+
+### url
+The URL to POST to.
+
+_**Data type:** String_
+
+### headers
+Headers to include with the POST request.
+
+_**Data type:** Object_
+
+### data
+The data body to include with the POST request.
+
+_**Data type:** Bytes_
+
+## Examples
+
+##### Send the last reported status to a URL
+```js
+import "json"
+import "http"
+
+lastReported =
+ from(bucket: "example-bucket")
+ |> range(start: -1m)
+ |> filter(fn: (r) => r._measurement == "statuses")
+ |> last()
+ |> tableFind(fn: (key) => exists key._level)
+ |> getColumn(column: "_level")
+
+http.post(
+ url: "http://myawsomeurl.com/api/notify",
+ headers: {Authorization: "Bearer mySuPerSecRetTokEn"},
+ data: bytes(v: lastReported[0])
+)
+```
diff --git a/content/v2.0/reference/flux/functions/influxdb-v1/_index.md b/content/v2.0/reference/flux/stdlib/influxdb-v1/_index.md
similarity index 86%
rename from content/v2.0/reference/flux/functions/influxdb-v1/_index.md
rename to content/v2.0/reference/flux/stdlib/influxdb-v1/_index.md
index ba5c03b8d..325f1e8d0 100644
--- a/content/v2.0/reference/flux/functions/influxdb-v1/_index.md
+++ b/content/v2.0/reference/flux/stdlib/influxdb-v1/_index.md
@@ -5,11 +5,13 @@ description: >
The Flux InfluxDB v1 package provides functions for managing data from an InfluxDB v1.x
database or structured using the InfluxDB v1 data structure.
Import the `influxdata/influxdb/v1` package.
+aliases:
+ - /v2.0/reference/flux/functions/influxdb-v1/
menu:
v2_0_ref:
name: InfluxDB v1
- parent: Flux packages and functions
-weight: 203
+ parent: Flux standard library
+weight: 202
v2.0/tags: [functions, influxdb-v1, package]
---
diff --git a/content/v2.0/reference/flux/functions/influxdb-v1/fieldsascols.md b/content/v2.0/reference/flux/stdlib/influxdb-v1/fieldsascols.md
similarity index 89%
rename from content/v2.0/reference/flux/functions/influxdb-v1/fieldsascols.md
rename to content/v2.0/reference/flux/stdlib/influxdb-v1/fieldsascols.md
index 0d7b2da64..a1596f8a6 100644
--- a/content/v2.0/reference/flux/functions/influxdb-v1/fieldsascols.md
+++ b/content/v2.0/reference/flux/stdlib/influxdb-v1/fieldsascols.md
@@ -4,6 +4,7 @@ description: The v1.fieldsAsCols() function is pivots a table and automatically
aliases:
- /v2.0/reference/flux/functions/inputs/fromrows
- /v2.0/reference/flux/functions/transformations/influxfieldsascols
+ - /v2.0/reference/flux/functions/influxdb-v1/fieldsascols/
menu:
v2_0_ref:
name: v1.fieldsAsCols
@@ -45,4 +46,4 @@ fieldsAsCols = (tables=<-) =>
```
_**Used functions:**
-[pivot()](/v2.0/reference/flux/functions/built-in/transformations/pivot)_
+[pivot()](/v2.0/reference/flux/stdlib/built-in/transformations/pivot)_
diff --git a/content/v2.0/reference/flux/functions/influxdb-v1/measurements.md b/content/v2.0/reference/flux/stdlib/influxdb-v1/measurements.md
similarity index 84%
rename from content/v2.0/reference/flux/functions/influxdb-v1/measurements.md
rename to content/v2.0/reference/flux/stdlib/influxdb-v1/measurements.md
index a13e77ae0..8ffd2b9bb 100644
--- a/content/v2.0/reference/flux/functions/influxdb-v1/measurements.md
+++ b/content/v2.0/reference/flux/stdlib/influxdb-v1/measurements.md
@@ -1,6 +1,8 @@
---
title: v1.measurements() function
description: The v1.measurements() function returns a list of measurements in a specific bucket.
+aliases:
+ - /v2.0/reference/flux/functions/influxdb-v1/measurements/
menu:
v2_0_ref:
name: v1.measurements
@@ -32,4 +34,4 @@ measurements = (bucket) =>
```
_**Used functions:**
-[tagValues()](/v2.0/reference/flux/functions/influxdb-v1/tagvalues)_
+[tagValues()](/v2.0/reference/flux/stdlib/influxdb-v1/tagvalues)_
diff --git a/content/v2.0/reference/flux/functions/influxdb-v1/measurementtagkeys.md b/content/v2.0/reference/flux/stdlib/influxdb-v1/measurementtagkeys.md
similarity index 87%
rename from content/v2.0/reference/flux/functions/influxdb-v1/measurementtagkeys.md
rename to content/v2.0/reference/flux/stdlib/influxdb-v1/measurementtagkeys.md
index dc1382b25..460908f39 100644
--- a/content/v2.0/reference/flux/functions/influxdb-v1/measurementtagkeys.md
+++ b/content/v2.0/reference/flux/stdlib/influxdb-v1/measurementtagkeys.md
@@ -1,6 +1,8 @@
---
title: v1.measurementTagKeys() function
description: The v1.measurementTagKeys() function returns a list of tag keys for a specific measurement.
+aliases:
+ - /v2.0/reference/flux/functions/influxdb-v1/measurementtagkeys/
menu:
v2_0_ref:
name: v1.measurementTagKeys
@@ -42,4 +44,4 @@ measurementTagKeys = (bucket, measurement) =>
```
_**Used functions:**
-[tagKeys()](/v2.0/reference/flux/functions/influxdb-v1/tagkeys)_
+[tagKeys()](/v2.0/reference/flux/stdlib/influxdb-v1/tagkeys)_
diff --git a/content/v2.0/reference/flux/functions/influxdb-v1/measurementtagvalues.md b/content/v2.0/reference/flux/stdlib/influxdb-v1/measurementtagvalues.md
similarity index 88%
rename from content/v2.0/reference/flux/functions/influxdb-v1/measurementtagvalues.md
rename to content/v2.0/reference/flux/stdlib/influxdb-v1/measurementtagvalues.md
index 979f07b9e..89a2281c7 100644
--- a/content/v2.0/reference/flux/functions/influxdb-v1/measurementtagvalues.md
+++ b/content/v2.0/reference/flux/stdlib/influxdb-v1/measurementtagvalues.md
@@ -1,6 +1,8 @@
---
title: v1.measurementTagValues() function
description: The v1.measurementTagValues() function returns a list of tag values for a specific measurement.
+aliases:
+ - /v2.0/reference/flux/functions/influxdb-v1/measurementtagvalues/
menu:
v2_0_ref:
name: v1.measurementTagValues
@@ -53,4 +55,4 @@ measurementTagValues = (bucket, measurement, tag) =>
```
_**Used functions:**
-[tagValues()](/v2.0/reference/flux/functions/influxdb-v1/tagvalues)_
+[tagValues()](/v2.0/reference/flux/stdlib/influxdb-v1/tagvalues)_
diff --git a/content/v2.0/reference/flux/functions/influxdb-v1/tagkeys.md b/content/v2.0/reference/flux/stdlib/influxdb-v1/tagkeys.md
similarity index 73%
rename from content/v2.0/reference/flux/functions/influxdb-v1/tagkeys.md
rename to content/v2.0/reference/flux/stdlib/influxdb-v1/tagkeys.md
index 7a65ac062..63d8a370d 100644
--- a/content/v2.0/reference/flux/functions/influxdb-v1/tagkeys.md
+++ b/content/v2.0/reference/flux/stdlib/influxdb-v1/tagkeys.md
@@ -1,6 +1,8 @@
---
title: v1.tagKeys() function
description: The v1.tagKeys() function returns a list of tag keys for all series that match the predicate.
+aliases:
+ - /v2.0/reference/flux/functions/influxdb-v1/tagkeys/
menu:
v2_0_ref:
name: v1.tagKeys
@@ -65,9 +67,9 @@ tagKeys = (bucket, predicate=(r) => true, start=-30d) =>
```
_**Used functions:**
-[from](/v2.0/reference/flux/functions/built-in/inputs/from/),
-[range](/v2.0/reference/flux/functions/built-in/transformations/range/),
-[filter](/v2.0/reference/flux/functions/built-in/transformations/filter/),
-[keys](/v2.0/reference/flux/functions/built-in/transformations/keys/),
-[keep](/v2.0/reference/flux/functions/built-in/transformations/keep/),
-[distinct](/v2.0/reference/flux/functions/built-in/transformations/selectors/distinct/)_
+[from](/v2.0/reference/flux/stdlib/built-in/inputs/from/),
+[range](/v2.0/reference/flux/stdlib/built-in/transformations/range/),
+[filter](/v2.0/reference/flux/stdlib/built-in/transformations/filter/),
+[keys](/v2.0/reference/flux/stdlib/built-in/transformations/keys/),
+[keep](/v2.0/reference/flux/stdlib/built-in/transformations/keep/),
+[distinct](/v2.0/reference/flux/stdlib/built-in/transformations/selectors/distinct/)_
diff --git a/content/v2.0/reference/flux/functions/influxdb-v1/tagvalues.md b/content/v2.0/reference/flux/stdlib/influxdb-v1/tagvalues.md
similarity index 74%
rename from content/v2.0/reference/flux/functions/influxdb-v1/tagvalues.md
rename to content/v2.0/reference/flux/stdlib/influxdb-v1/tagvalues.md
index 7db47cb88..51266ccce 100644
--- a/content/v2.0/reference/flux/functions/influxdb-v1/tagvalues.md
+++ b/content/v2.0/reference/flux/stdlib/influxdb-v1/tagvalues.md
@@ -1,6 +1,8 @@
---
title: v1.tagValues() function
description: The `v1.tagValues()` function returns a list unique values for a given tag.
+aliases:
+ - /v2.0/reference/flux/functions/influxdb-v1/tagvalues/
menu:
v2_0_ref:
name: v1.tagValues
@@ -73,9 +75,9 @@ tagValues = (bucket, tag, predicate=(r) => true, start=-30d) =>
```
_**Used functions:**
-[from](/v2.0/reference/flux/functions/built-in/inputs/from/),
-[range](/v2.0/reference/flux/functions/built-in/transformations/range/),
-[filter](/v2.0/reference/flux/functions/built-in/transformations/filter/),
-[group](/v2.0/reference/flux/functions/built-in/transformations/group/),
-[distinct](/v2.0/reference/flux/functions/built-in/transformations/selectors/distinct/),
-[keep](/v2.0/reference/flux/functions/built-in/transformations/keep/)_
+[from](/v2.0/reference/flux/stdlib/built-in/inputs/from/),
+[range](/v2.0/reference/flux/stdlib/built-in/transformations/range/),
+[filter](/v2.0/reference/flux/stdlib/built-in/transformations/filter/),
+[group](/v2.0/reference/flux/stdlib/built-in/transformations/group/),
+[distinct](/v2.0/reference/flux/stdlib/built-in/transformations/selectors/distinct/),
+[keep](/v2.0/reference/flux/stdlib/built-in/transformations/keep/)_
diff --git a/content/v2.0/reference/flux/stdlib/json/_index.md b/content/v2.0/reference/flux/stdlib/json/_index.md
new file mode 100644
index 000000000..00d4e266c
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/json/_index.md
@@ -0,0 +1,24 @@
+---
+title: Flux JSON package
+list_title: JSON package
+description: >
+ The Flux JSON package provides functions for working with JSON.
+ Import the `json` package.
+aliases:
+ - /v2.0/reference/flux/functions/json/
+menu:
+ v2_0_ref:
+ name: JSON
+ parent: Flux standard library
+weight: 202
+v2.0/tags: [functions, json, package]
+---
+
+JSON Flux functions provide tools for working with JSON.
+Import the `json` package:
+
+```js
+import "json"
+```
+
+{{< children type="functions" show="pages" >}}
diff --git a/content/v2.0/reference/flux/stdlib/json/encode.md b/content/v2.0/reference/flux/stdlib/json/encode.md
new file mode 100644
index 000000000..0c6f42cd9
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/json/encode.md
@@ -0,0 +1,49 @@
+---
+title: json.encode() function
+description: The `json.encode()` function converts a value into JSON bytes.
+aliases:
+ - /v2.0/reference/flux/functions/json/encode/
+menu:
+ v2_0_ref:
+ name: json.encode
+ parent: JSON
+weight: 202
+---
+
+The `json.encode()` function converts a value into JSON bytes.
+
+_**Function type:** Type conversion_
+
+```js
+import "json"
+
+json.encode(v: "some value")
+```
+
+This function encodes [Flux types](/v2.0/reference/flux/language/types/) as follows:
+
+- `time` values in [RFC3339](https://tools.ietf.org/html/rfc3339) format
+- `duration` values in number of milliseconds since the epoch
+- `regexp` values as their string representation
+- `bytes` values as base64-encoded strings
+- `function` values are not encoded and produce an error
+
+## Parameters
+
+### v
+The value to convert.
+
+_**Data type:** Boolean | Duration | Float | Integer | String | Time | UInteger_
+
+## Examples
+
+### Encode all values in a column in JSON bytes
+```js
+import "json"
+
+from(bucket: "example-bucket")
+ |> range(start: -1h)
+ |> map(fn: (r) => ({
+ r with _value: json.encode(v: r._value)
+ }))
+```
diff --git a/content/v2.0/reference/flux/functions/math/_index.md b/content/v2.0/reference/flux/stdlib/math/_index.md
similarity index 95%
rename from content/v2.0/reference/flux/functions/math/_index.md
rename to content/v2.0/reference/flux/stdlib/math/_index.md
index bb0ef8f25..1ad8fa2fb 100644
--- a/content/v2.0/reference/flux/functions/math/_index.md
+++ b/content/v2.0/reference/flux/stdlib/math/_index.md
@@ -4,10 +4,12 @@ list_title: Math package
description: >
The Flux math package provides basic constants and mathematical functions.
Import the `math` package.
+aliases:
+ - /v2.0/reference/flux/functions/math/
menu:
v2_0_ref:
name: Math
- parent: Flux packages and functions
+ parent: Flux standard library
weight: 202
v2.0/tags: [math, functions]
---
diff --git a/content/v2.0/reference/flux/functions/math/abs.md b/content/v2.0/reference/flux/stdlib/math/abs.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/math/abs.md
rename to content/v2.0/reference/flux/stdlib/math/abs.md
index 8db0c56b7..ac946c108 100644
--- a/content/v2.0/reference/flux/functions/math/abs.md
+++ b/content/v2.0/reference/flux/stdlib/math/abs.md
@@ -1,6 +1,8 @@
---
title: math.abs() function
description: The math.abs() function returns the absolute value of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/abs/
menu:
v2_0_ref:
name: math.abs
diff --git a/content/v2.0/reference/flux/functions/math/acos.md b/content/v2.0/reference/flux/stdlib/math/acos.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/acos.md
rename to content/v2.0/reference/flux/stdlib/math/acos.md
index 5b6b88fd8..528edc827 100644
--- a/content/v2.0/reference/flux/functions/math/acos.md
+++ b/content/v2.0/reference/flux/stdlib/math/acos.md
@@ -1,6 +1,8 @@
---
title: math.acos() function
description: The math.acos() function returns the arccosine of `x` in radians.
+aliases:
+ - /v2.0/reference/flux/functions/math/acos/
menu:
v2_0_ref:
name: math.acos
diff --git a/content/v2.0/reference/flux/functions/math/acosh.md b/content/v2.0/reference/flux/stdlib/math/acosh.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/acosh.md
rename to content/v2.0/reference/flux/stdlib/math/acosh.md
index 95cead337..80ccad2c2 100644
--- a/content/v2.0/reference/flux/functions/math/acosh.md
+++ b/content/v2.0/reference/flux/stdlib/math/acosh.md
@@ -1,6 +1,8 @@
---
title: math.acosh() function
description: The math.acosh() function returns the inverse hyperbolic cosine of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/acosh/
menu:
v2_0_ref:
name: math.acosh
diff --git a/content/v2.0/reference/flux/functions/math/asin.md b/content/v2.0/reference/flux/stdlib/math/asin.md
similarity index 92%
rename from content/v2.0/reference/flux/functions/math/asin.md
rename to content/v2.0/reference/flux/stdlib/math/asin.md
index 614e12174..9e667f686 100644
--- a/content/v2.0/reference/flux/functions/math/asin.md
+++ b/content/v2.0/reference/flux/stdlib/math/asin.md
@@ -1,6 +1,8 @@
---
title: math.asin() function
description: The math.asin() function returns the arcsine of `x` in radians.
+aliases:
+ - /v2.0/reference/flux/functions/math/asin/
menu:
v2_0_ref:
name: math.asin
diff --git a/content/v2.0/reference/flux/functions/math/asinh.md b/content/v2.0/reference/flux/stdlib/math/asinh.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/asinh.md
rename to content/v2.0/reference/flux/stdlib/math/asinh.md
index 9800d3ad0..4ff4ca9bc 100644
--- a/content/v2.0/reference/flux/functions/math/asinh.md
+++ b/content/v2.0/reference/flux/stdlib/math/asinh.md
@@ -1,6 +1,8 @@
---
title: math.asinh() function
description: The math.asinh() function returns the inverse hyperbolic sine of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/asinh/
menu:
v2_0_ref:
name: math.asinh
diff --git a/content/v2.0/reference/flux/functions/math/atan.md b/content/v2.0/reference/flux/stdlib/math/atan.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/math/atan.md
rename to content/v2.0/reference/flux/stdlib/math/atan.md
index 524a3fe96..77a360dc1 100644
--- a/content/v2.0/reference/flux/functions/math/atan.md
+++ b/content/v2.0/reference/flux/stdlib/math/atan.md
@@ -1,6 +1,8 @@
---
title: math.atan() function
description: The math.atan() function returns the arctangent of `x` in radians.
+aliases:
+ - /v2.0/reference/flux/functions/math/atan/
menu:
v2_0_ref:
name: math.atan
diff --git a/content/v2.0/reference/flux/functions/math/atan2.md b/content/v2.0/reference/flux/stdlib/math/atan2.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/math/atan2.md
rename to content/v2.0/reference/flux/stdlib/math/atan2.md
index 1556dcbf5..69049aafc 100644
--- a/content/v2.0/reference/flux/functions/math/atan2.md
+++ b/content/v2.0/reference/flux/stdlib/math/atan2.md
@@ -3,6 +3,8 @@ title: math.atan2() function
description: >
The math.atan2() function returns the arc tangent of `y`/`x`, using the signs of
the parameters to determine the quadrant of the return value.
+aliases:
+ - /v2.0/reference/flux/functions/math/atan2/
menu:
v2_0_ref:
name: math.atan2
diff --git a/content/v2.0/reference/flux/functions/math/atanh.md b/content/v2.0/reference/flux/stdlib/math/atanh.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/math/atanh.md
rename to content/v2.0/reference/flux/stdlib/math/atanh.md
index d3a551543..ea01f337a 100644
--- a/content/v2.0/reference/flux/functions/math/atanh.md
+++ b/content/v2.0/reference/flux/stdlib/math/atanh.md
@@ -1,6 +1,8 @@
---
title: math.atanh() function
description: The math.atanh() function returns the inverse hyperbolic tangent of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/atanh/
menu:
v2_0_ref:
name: math.atanh
diff --git a/content/v2.0/reference/flux/functions/math/cbrt.md b/content/v2.0/reference/flux/stdlib/math/cbrt.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/math/cbrt.md
rename to content/v2.0/reference/flux/stdlib/math/cbrt.md
index 6054dacf3..f7018bb68 100644
--- a/content/v2.0/reference/flux/functions/math/cbrt.md
+++ b/content/v2.0/reference/flux/stdlib/math/cbrt.md
@@ -1,6 +1,8 @@
---
title: math.cbrt() function
description: The math.cbrt() function returns the cube root of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/cbrt/
menu:
v2_0_ref:
name: math.cbrt
diff --git a/content/v2.0/reference/flux/functions/math/ceil.md b/content/v2.0/reference/flux/stdlib/math/ceil.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/ceil.md
rename to content/v2.0/reference/flux/stdlib/math/ceil.md
index 091ffbedb..397fd53e3 100644
--- a/content/v2.0/reference/flux/functions/math/ceil.md
+++ b/content/v2.0/reference/flux/stdlib/math/ceil.md
@@ -1,6 +1,8 @@
---
title: math.ceil() function
description: The math.ceil() function returns the least integer value greater than or equal to `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/ceil/
menu:
v2_0_ref:
name: math.ceil
diff --git a/content/v2.0/reference/flux/functions/math/copysign.md b/content/v2.0/reference/flux/stdlib/math/copysign.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/math/copysign.md
rename to content/v2.0/reference/flux/stdlib/math/copysign.md
index 26466573f..ecc4b0267 100644
--- a/content/v2.0/reference/flux/functions/math/copysign.md
+++ b/content/v2.0/reference/flux/stdlib/math/copysign.md
@@ -1,6 +1,8 @@
---
title: math.copysign() function
description: The math.copysign() function returns a value with the magnitude of `x` and the sign of `y`.
+aliases:
+ - /v2.0/reference/flux/functions/math/copysign/
menu:
v2_0_ref:
name: math.copysign
diff --git a/content/v2.0/reference/flux/functions/math/cos.md b/content/v2.0/reference/flux/stdlib/math/cos.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/math/cos.md
rename to content/v2.0/reference/flux/stdlib/math/cos.md
index 7c1b6bd2f..f06e07a50 100644
--- a/content/v2.0/reference/flux/functions/math/cos.md
+++ b/content/v2.0/reference/flux/stdlib/math/cos.md
@@ -1,6 +1,8 @@
---
title: math.cos() function
description: The math.cos() function returns the cosine of the radian argument `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/cos/
menu:
v2_0_ref:
name: math.cos
diff --git a/content/v2.0/reference/flux/functions/math/cosh.md b/content/v2.0/reference/flux/stdlib/math/cosh.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/math/cosh.md
rename to content/v2.0/reference/flux/stdlib/math/cosh.md
index 22fff8771..a23477022 100644
--- a/content/v2.0/reference/flux/functions/math/cosh.md
+++ b/content/v2.0/reference/flux/stdlib/math/cosh.md
@@ -1,6 +1,8 @@
---
title: math.cosh() function
description: The math.cosh() function returns the hyperbolic cosine of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/cosh/
menu:
v2_0_ref:
name: math.cosh
diff --git a/content/v2.0/reference/flux/functions/math/dim.md b/content/v2.0/reference/flux/stdlib/math/dim.md
similarity index 92%
rename from content/v2.0/reference/flux/functions/math/dim.md
rename to content/v2.0/reference/flux/stdlib/math/dim.md
index b52a3c8b2..135bb4356 100644
--- a/content/v2.0/reference/flux/functions/math/dim.md
+++ b/content/v2.0/reference/flux/stdlib/math/dim.md
@@ -1,6 +1,8 @@
---
title: math.dim() function
description: The math.dim() function returns the maximum of `x`-`y` or 0.
+aliases:
+ - /v2.0/reference/flux/functions/math/dim/
menu:
v2_0_ref:
name: math.dim
diff --git a/content/v2.0/reference/flux/functions/math/erf.md b/content/v2.0/reference/flux/stdlib/math/erf.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/math/erf.md
rename to content/v2.0/reference/flux/stdlib/math/erf.md
index ebc8cca16..18c23441e 100644
--- a/content/v2.0/reference/flux/functions/math/erf.md
+++ b/content/v2.0/reference/flux/stdlib/math/erf.md
@@ -1,6 +1,8 @@
---
title: math.erf() function
description: The math.erf() function returns the error function of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/erf/
menu:
v2_0_ref:
name: math.erf
diff --git a/content/v2.0/reference/flux/functions/math/erfc.md b/content/v2.0/reference/flux/stdlib/math/erfc.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/erfc.md
rename to content/v2.0/reference/flux/stdlib/math/erfc.md
index a2a7e2ac5..176ccd6bb 100644
--- a/content/v2.0/reference/flux/functions/math/erfc.md
+++ b/content/v2.0/reference/flux/stdlib/math/erfc.md
@@ -1,6 +1,8 @@
---
title: math.erfc() function
description: The math.erfc() function returns the complementary error function of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/erfc/
menu:
v2_0_ref:
name: math.erfc
diff --git a/content/v2.0/reference/flux/functions/math/erfcinv.md b/content/v2.0/reference/flux/stdlib/math/erfcinv.md
similarity index 92%
rename from content/v2.0/reference/flux/functions/math/erfcinv.md
rename to content/v2.0/reference/flux/stdlib/math/erfcinv.md
index c5f2c473f..4a1ba079e 100644
--- a/content/v2.0/reference/flux/functions/math/erfcinv.md
+++ b/content/v2.0/reference/flux/stdlib/math/erfcinv.md
@@ -1,6 +1,8 @@
---
title: math.erfcinv() function
description: The math.erfcinv() function returns the inverse of `math.erfc()`.
+aliases:
+ - /v2.0/reference/flux/functions/math/erfcinv/
menu:
v2_0_ref:
name: math.erfcinv
diff --git a/content/v2.0/reference/flux/functions/math/erfinv.md b/content/v2.0/reference/flux/stdlib/math/erfinv.md
similarity index 92%
rename from content/v2.0/reference/flux/functions/math/erfinv.md
rename to content/v2.0/reference/flux/stdlib/math/erfinv.md
index 2c4511397..1abdc350c 100644
--- a/content/v2.0/reference/flux/functions/math/erfinv.md
+++ b/content/v2.0/reference/flux/stdlib/math/erfinv.md
@@ -1,6 +1,8 @@
---
title: math.erfinv() function
description: The math.erfinv() function returns the inverse error function of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/erfinv/
menu:
v2_0_ref:
name: math.erfinv
diff --git a/content/v2.0/reference/flux/functions/math/exp.md b/content/v2.0/reference/flux/stdlib/math/exp.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/exp.md
rename to content/v2.0/reference/flux/stdlib/math/exp.md
index 14228533a..5d530b556 100644
--- a/content/v2.0/reference/flux/functions/math/exp.md
+++ b/content/v2.0/reference/flux/stdlib/math/exp.md
@@ -1,6 +1,8 @@
---
title: math.exp() function
description: The math.exp() function returns `e**x`, the base-e exponential of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/exp/
menu:
v2_0_ref:
name: math.exp
diff --git a/content/v2.0/reference/flux/functions/math/exp2.md b/content/v2.0/reference/flux/stdlib/math/exp2.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/exp2.md
rename to content/v2.0/reference/flux/stdlib/math/exp2.md
index d66b572d0..599bf86ff 100644
--- a/content/v2.0/reference/flux/functions/math/exp2.md
+++ b/content/v2.0/reference/flux/stdlib/math/exp2.md
@@ -1,6 +1,8 @@
---
title: math.exp2() function
description: The math.exp2() function returns `2**x`, the base-2 exponential of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/exp2/
menu:
v2_0_ref:
name: math.exp2
diff --git a/content/v2.0/reference/flux/functions/math/expm1.md b/content/v2.0/reference/flux/stdlib/math/expm1.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/math/expm1.md
rename to content/v2.0/reference/flux/stdlib/math/expm1.md
index 953926420..02df8ce1a 100644
--- a/content/v2.0/reference/flux/functions/math/expm1.md
+++ b/content/v2.0/reference/flux/stdlib/math/expm1.md
@@ -3,6 +3,8 @@ title: math.expm1() function
description: >
The math.expm1() function returns `e**x - 1`, the base-e exponential of `x` minus 1.
It is more accurate than `math.exp(x:x) - 1` when `x` is near zero.
+aliases:
+ - /v2.0/reference/flux/functions/math/expm1/
menu:
v2_0_ref:
name: math.expm1
diff --git a/content/v2.0/reference/flux/functions/math/float64bits.md b/content/v2.0/reference/flux/stdlib/math/float64bits.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/float64bits.md
rename to content/v2.0/reference/flux/stdlib/math/float64bits.md
index 95a16dcb9..20d233052 100644
--- a/content/v2.0/reference/flux/functions/math/float64bits.md
+++ b/content/v2.0/reference/flux/stdlib/math/float64bits.md
@@ -1,6 +1,8 @@
---
title: math.float64bits() function
description: The math.float64bits() function returns the IEEE 754 binary representation of `f`, with the sign bit of `f` and the result in the same bit position.
+aliases:
+ - /v2.0/reference/flux/functions/math/float64bits/
menu:
v2_0_ref:
name: math.float64bits
diff --git a/content/v2.0/reference/flux/functions/math/floor.md b/content/v2.0/reference/flux/stdlib/math/floor.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/floor.md
rename to content/v2.0/reference/flux/stdlib/math/floor.md
index bea8631d9..4e87ee103 100644
--- a/content/v2.0/reference/flux/functions/math/floor.md
+++ b/content/v2.0/reference/flux/stdlib/math/floor.md
@@ -1,6 +1,8 @@
---
title: math.floor() function
description: The math.floor() function returns the greatest integer value less than or equal to `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/floor/
menu:
v2_0_ref:
name: math.floor
diff --git a/content/v2.0/reference/flux/functions/math/frexp.md b/content/v2.0/reference/flux/stdlib/math/frexp.md
similarity index 94%
rename from content/v2.0/reference/flux/functions/math/frexp.md
rename to content/v2.0/reference/flux/stdlib/math/frexp.md
index 622a1006c..6b4a043f7 100644
--- a/content/v2.0/reference/flux/functions/math/frexp.md
+++ b/content/v2.0/reference/flux/stdlib/math/frexp.md
@@ -4,6 +4,8 @@ description: >
The math.frexp() function breaks `f` into a normalized fraction and an integral power of two.
It returns `frac` and `exp` satisfying `f == frac × 2**exp`, with the absolute
value of `frac` in the interval [½, 1).
+aliases:
+ - /v2.0/reference/flux/functions/math/frexp/
menu:
v2_0_ref:
name: math.frexp
diff --git a/content/v2.0/reference/flux/functions/math/gamma.md b/content/v2.0/reference/flux/stdlib/math/gamma.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/gamma.md
rename to content/v2.0/reference/flux/stdlib/math/gamma.md
index 744d5cdc8..81fb2f936 100644
--- a/content/v2.0/reference/flux/functions/math/gamma.md
+++ b/content/v2.0/reference/flux/stdlib/math/gamma.md
@@ -1,6 +1,8 @@
---
title: math.gamma() function
description: The math.gamma() function returns the Gamma function of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/gamma/
menu:
v2_0_ref:
name: math.gamma
diff --git a/content/v2.0/reference/flux/functions/math/hypot.md b/content/v2.0/reference/flux/stdlib/math/hypot.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/math/hypot.md
rename to content/v2.0/reference/flux/stdlib/math/hypot.md
index c9d6af266..459f83f31 100644
--- a/content/v2.0/reference/flux/functions/math/hypot.md
+++ b/content/v2.0/reference/flux/stdlib/math/hypot.md
@@ -3,6 +3,8 @@ title: math.hypot() function
description: >
The math.hypot() function returns the square root of `p*p + q*q`,
taking care to avoid unnecessary overflow and underflow.
+aliases:
+ - /v2.0/reference/flux/functions/math/hypot/
menu:
v2_0_ref:
name: math.hypot
diff --git a/content/v2.0/reference/flux/functions/math/ilogb.md b/content/v2.0/reference/flux/stdlib/math/ilogb.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/ilogb.md
rename to content/v2.0/reference/flux/stdlib/math/ilogb.md
index 22b87b558..ec18d023c 100644
--- a/content/v2.0/reference/flux/functions/math/ilogb.md
+++ b/content/v2.0/reference/flux/stdlib/math/ilogb.md
@@ -1,6 +1,8 @@
---
title: math.ilogb() function
description: The math.ilogb() function returns the binary exponent of `x` as an integer.
+aliases:
+ - /v2.0/reference/flux/functions/math/ilogb/
menu:
v2_0_ref:
name: math.ilogb
diff --git a/content/v2.0/reference/flux/functions/math/isinf.md b/content/v2.0/reference/flux/stdlib/math/isinf.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/math/isinf.md
rename to content/v2.0/reference/flux/stdlib/math/isinf.md
index 900dc54cf..e1d69151b 100644
--- a/content/v2.0/reference/flux/functions/math/isinf.md
+++ b/content/v2.0/reference/flux/stdlib/math/isinf.md
@@ -1,6 +1,8 @@
---
title: math.isInf() function
description: The math.isInf() function reports whether `f` is an infinity, according to `sign`.
+aliases:
+ - /v2.0/reference/flux/functions/math/isinf/
menu:
v2_0_ref:
name: math.isInf
diff --git a/content/v2.0/reference/flux/functions/math/isnan.md b/content/v2.0/reference/flux/stdlib/math/isnan.md
similarity index 89%
rename from content/v2.0/reference/flux/functions/math/isnan.md
rename to content/v2.0/reference/flux/stdlib/math/isnan.md
index dfcf73ae1..f2736f93e 100644
--- a/content/v2.0/reference/flux/functions/math/isnan.md
+++ b/content/v2.0/reference/flux/stdlib/math/isnan.md
@@ -1,6 +1,8 @@
---
title: math.isNaN() function
description: The math.isNaN() function reports whether `f` is an IEEE 754 “not-a-number” value.
+aliases:
+ - /v2.0/reference/flux/functions/math/isnan/
menu:
v2_0_ref:
name: math.isNaN
@@ -26,8 +28,3 @@ math.isNaN(f: 12.345)
The value used in the evaluation.
_**Data type:** Float_
-
-## Special cases
-```js
-
-```
diff --git a/content/v2.0/reference/flux/functions/math/j0.md b/content/v2.0/reference/flux/stdlib/math/j0.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/j0.md
rename to content/v2.0/reference/flux/stdlib/math/j0.md
index 74338fd03..1d8faf6fe 100644
--- a/content/v2.0/reference/flux/functions/math/j0.md
+++ b/content/v2.0/reference/flux/stdlib/math/j0.md
@@ -1,6 +1,8 @@
---
title: math.j0() function
description: The math.j0() function returns the order-zero Bessel function of the first kind.
+aliases:
+ - /v2.0/reference/flux/functions/math/j0/
menu:
v2_0_ref:
name: math.j0
diff --git a/content/v2.0/reference/flux/functions/math/j1.md b/content/v2.0/reference/flux/stdlib/math/j1.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/j1.md
rename to content/v2.0/reference/flux/stdlib/math/j1.md
index 594b3c81d..0160d3915 100644
--- a/content/v2.0/reference/flux/functions/math/j1.md
+++ b/content/v2.0/reference/flux/stdlib/math/j1.md
@@ -1,6 +1,8 @@
---
title: math.j1() function
description: The math.j1() function returns the order-one Bessel function of the first kind.
+aliases:
+ - /v2.0/reference/flux/functions/math/j1/
menu:
v2_0_ref:
name: math.j1
diff --git a/content/v2.0/reference/flux/functions/math/jn.md b/content/v2.0/reference/flux/stdlib/math/jn.md
similarity index 92%
rename from content/v2.0/reference/flux/functions/math/jn.md
rename to content/v2.0/reference/flux/stdlib/math/jn.md
index 80e0f6acf..2bda3bc79 100644
--- a/content/v2.0/reference/flux/functions/math/jn.md
+++ b/content/v2.0/reference/flux/stdlib/math/jn.md
@@ -1,6 +1,8 @@
---
title: math.jn() function
description: The math.jn() function returns the order-n Bessel function of the first kind.
+aliases:
+ - /v2.0/reference/flux/functions/math/jn/
menu:
v2_0_ref:
name: math.jn
diff --git a/content/v2.0/reference/flux/functions/math/ldexp.md b/content/v2.0/reference/flux/stdlib/math/ldexp.md
similarity index 88%
rename from content/v2.0/reference/flux/functions/math/ldexp.md
rename to content/v2.0/reference/flux/stdlib/math/ldexp.md
index 8ecfb26eb..9213320a7 100644
--- a/content/v2.0/reference/flux/functions/math/ldexp.md
+++ b/content/v2.0/reference/flux/stdlib/math/ldexp.md
@@ -1,6 +1,8 @@
---
title: math.ldexp() function
description: The math.ldexp() function is the inverse of `math.frexp()`. It returns `frac × 2**exp`.
+aliases:
+ - /v2.0/reference/flux/functions/math/ldexp/
menu:
v2_0_ref:
name: math.ldexp
@@ -8,7 +10,7 @@ menu:
weight: 301
---
-The `math.ldexp()` function is the inverse of [`math.frexp()`](/v2.0/reference/flux/functions/math/frexp).
+The `math.ldexp()` function is the inverse of [`math.frexp()`](/v2.0/reference/flux/stdlib/math/frexp).
It returns `frac × 2**exp`.
_**Output data type:** Float_
diff --git a/content/v2.0/reference/flux/functions/math/lgamma.md b/content/v2.0/reference/flux/stdlib/math/lgamma.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/math/lgamma.md
rename to content/v2.0/reference/flux/stdlib/math/lgamma.md
index 2be3b19e5..327505a88 100644
--- a/content/v2.0/reference/flux/functions/math/lgamma.md
+++ b/content/v2.0/reference/flux/stdlib/math/lgamma.md
@@ -1,6 +1,8 @@
---
title: math.lgamma() function
description: The math.lgamma() function returns the natural logarithm and sign (-1 or +1) of `math.gamma(x:x)`.
+aliases:
+ - /v2.0/reference/flux/functions/math/lgamma/
menu:
v2_0_ref:
name: math.lgamma
diff --git a/content/v2.0/reference/flux/functions/math/log.md b/content/v2.0/reference/flux/stdlib/math/log.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/log.md
rename to content/v2.0/reference/flux/stdlib/math/log.md
index 84eb17fc8..8e2672f6d 100644
--- a/content/v2.0/reference/flux/functions/math/log.md
+++ b/content/v2.0/reference/flux/stdlib/math/log.md
@@ -1,6 +1,8 @@
---
title: math.log() function
description: The math.log() function returns the natural logarithm of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/log/
menu:
v2_0_ref:
name: math.log
diff --git a/content/v2.0/reference/flux/functions/math/log10.md b/content/v2.0/reference/flux/stdlib/math/log10.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/log10.md
rename to content/v2.0/reference/flux/stdlib/math/log10.md
index 1b3a7ed72..7f6ab2843 100644
--- a/content/v2.0/reference/flux/functions/math/log10.md
+++ b/content/v2.0/reference/flux/stdlib/math/log10.md
@@ -1,6 +1,8 @@
---
title: math.log10() function
description: The math.log10() function returns the decimal logarithm of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/log10/
menu:
v2_0_ref:
name: math.log10
diff --git a/content/v2.0/reference/flux/functions/math/log1p.md b/content/v2.0/reference/flux/stdlib/math/log1p.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/math/log1p.md
rename to content/v2.0/reference/flux/stdlib/math/log1p.md
index 463ccc078..a5ced2f8b 100644
--- a/content/v2.0/reference/flux/functions/math/log1p.md
+++ b/content/v2.0/reference/flux/stdlib/math/log1p.md
@@ -3,6 +3,8 @@ title: math.log1p() function
description: >
The math.log1p() function returns the natural logarithm of 1 plus its argument `x`.
It is more accurate than `math.log(x: 1 + x)` when `x` is near zero.
+aliases:
+ - /v2.0/reference/flux/functions/math/log1p/
menu:
v2_0_ref:
name: math.log1p
diff --git a/content/v2.0/reference/flux/functions/math/log2.md b/content/v2.0/reference/flux/stdlib/math/log2.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/log2.md
rename to content/v2.0/reference/flux/stdlib/math/log2.md
index a44fd54f0..f61944d2f 100644
--- a/content/v2.0/reference/flux/functions/math/log2.md
+++ b/content/v2.0/reference/flux/stdlib/math/log2.md
@@ -1,6 +1,8 @@
---
title: math.log2() function
description: The math.log2() function returns the binary logarithm of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/log2/
menu:
v2_0_ref:
name: math.log2
diff --git a/content/v2.0/reference/flux/functions/math/logb.md b/content/v2.0/reference/flux/stdlib/math/logb.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/math/logb.md
rename to content/v2.0/reference/flux/stdlib/math/logb.md
index cd5049a70..313c116be 100644
--- a/content/v2.0/reference/flux/functions/math/logb.md
+++ b/content/v2.0/reference/flux/stdlib/math/logb.md
@@ -1,6 +1,8 @@
---
title: math.logb() function
description: The math.logb() function returns the binary exponent of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/logb/
menu:
v2_0_ref:
name: math.logb
diff --git a/content/v2.0/reference/flux/stdlib/math/minf.md b/content/v2.0/reference/flux/stdlib/math/minf.md
new file mode 100644
index 000000000..e419a6a8f
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/math/minf.md
@@ -0,0 +1,31 @@
+---
+title: math.mInf() function
+description: The math.mInf() function returns positive infinity if `sign >= 0`, negative infinity if `sign < 0`.
+aliases:
+ - /v2.0/reference/flux/functions/math/m_inf/
+ - /v2.0/reference/flux/stdlib/math/m_inf/
+menu:
+ v2_0_ref:
+ name: math.mInf
+ parent: Math
+weight: 301
+---
+
+The `math.mInf()` function returns positive infinity if `sign >= 0`, negative infinity if `sign < 0`.
+
+_**Output data type:** Float_
+
+```js
+import "math"
+
+math.mInf(sign: 1)
+
+// Returns +Inf
+```
+
+## Parameters
+
+### sign
+The sign value used in the operation.
+
+_**Data type:** Integer_
diff --git a/content/v2.0/reference/flux/stdlib/math/mmax.md b/content/v2.0/reference/flux/stdlib/math/mmax.md
new file mode 100644
index 000000000..623d31f69
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/math/mmax.md
@@ -0,0 +1,47 @@
+---
+title: math.mMax() function
+description: The math.mMax() function returns the larger of `x` or `y`.
+aliases:
+ - /v2.0/reference/flux/functions/math/m_max/
+ - /v2.0/reference/flux/stdlib/math/m_max/
+menu:
+ v2_0_ref:
+ name: math.mMax
+ parent: Math
+weight: 301
+---
+
+The `math.mMax()` function returns the larger of `x` or `y`.
+
+_**Output data type:** Float_
+
+```js
+import "math"
+
+math.mMax(x: 1.23, y: 4.56)
+
+// Returns 4.56
+```
+
+## Parameters
+
+### x
+The X value used in the operation.
+
+_**Data type:** Float_
+
+### y
+The Y value used in the operation.
+
+_**Data type:** Float_
+
+## Special cases
+```js
+math.mMax(x:x, y:+Inf) // Returns +Inf
+math.mMax(x: +Inf, y:y) // Returns +Inf
+math.mMax(x:x, y: NaN) // Returns NaN
+math.mMax(x: NaN, y:y) // Returns NaN
+math.mMax(x: +0, y: ±0) // Returns +0
+math.mMax(x: ±0, y: +0) // Returns +0
+math.mMax(x: -0, y: -0) // Returns -0
+```
diff --git a/content/v2.0/reference/flux/stdlib/math/mmin.md b/content/v2.0/reference/flux/stdlib/math/mmin.md
new file mode 100644
index 000000000..ace68ee42
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/math/mmin.md
@@ -0,0 +1,46 @@
+---
+title: math.mMin() function
+description: The math.mMin() function returns the smaller of `x` or `y`.
+aliases:
+ - /v2.0/reference/flux/functions/math/m_min/
+ - /v2.0/reference/flux/stdlib/math/m_min/
+menu:
+ v2_0_ref:
+ name: math.mMin
+ parent: Math
+weight: 301
+---
+
+The `math.mMin()` function returns the smaller of `x` or `y`.
+
+_**Output data type:** Float_
+
+```js
+import "math"
+
+math.mMin(x: 1.23, y: 4.56)
+
+// Returns 1.23
+```
+
+## Parameters
+
+### x
+The X value used in the operation.
+
+_**Data type:** Float_
+
+### y
+The Y value used in the operation.
+
+_**Data type:** Float_
+
+## Special cases
+```js
+math.mMin(x:x, y: -Inf) // Returns -Inf
+math.mMin(x: -Inf, y:y) // Returns -Inf
+math.mMin(x:x, y: NaN) // Returns NaN
+math.mMin(x: NaN, y:y) // Returns NaN
+math.mMin(x: -0, y: ±0) // Returns -0
+math.mMin(x: ±0, y: -0) // Returns -0
+```
diff --git a/content/v2.0/reference/flux/functions/math/mod.md b/content/v2.0/reference/flux/stdlib/math/mod.md
similarity index 94%
rename from content/v2.0/reference/flux/functions/math/mod.md
rename to content/v2.0/reference/flux/stdlib/math/mod.md
index e23493865..afef0a1ba 100644
--- a/content/v2.0/reference/flux/functions/math/mod.md
+++ b/content/v2.0/reference/flux/stdlib/math/mod.md
@@ -3,6 +3,8 @@ title: math.mod() function
description: >
The math.mod() function returns the floating-point remainder of `x`/`y`.
The magnitude of the result is less than `y` and its sign agrees with that of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/mod/
menu:
v2_0_ref:
name: math.mod
diff --git a/content/v2.0/reference/flux/functions/math/modf.md b/content/v2.0/reference/flux/stdlib/math/modf.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/math/modf.md
rename to content/v2.0/reference/flux/stdlib/math/modf.md
index 78e4ef7fd..94cf32fe9 100644
--- a/content/v2.0/reference/flux/functions/math/modf.md
+++ b/content/v2.0/reference/flux/stdlib/math/modf.md
@@ -3,6 +3,8 @@ title: math.modf() function
description: >
The math.modf() function returns integer and fractional floating-point numbers that sum to `f`.
Both values have the same sign as `f`.
+aliases:
+ - /v2.0/reference/flux/functions/math/modf/
menu:
v2_0_ref:
name: math.modf
diff --git a/content/v2.0/reference/flux/functions/math/nan.md b/content/v2.0/reference/flux/stdlib/math/nan.md
similarity index 85%
rename from content/v2.0/reference/flux/functions/math/nan.md
rename to content/v2.0/reference/flux/stdlib/math/nan.md
index ebb54f06b..714f3f676 100644
--- a/content/v2.0/reference/flux/functions/math/nan.md
+++ b/content/v2.0/reference/flux/stdlib/math/nan.md
@@ -1,6 +1,8 @@
---
title: math.NaN() function
description: The math.NaN() function returns an IEEE 754 “not-a-number” value.
+aliases:
+ - /v2.0/reference/flux/functions/math/nan/
menu:
v2_0_ref:
name: math.NaN
diff --git a/content/v2.0/reference/flux/functions/math/nextafter.md b/content/v2.0/reference/flux/stdlib/math/nextafter.md
similarity index 92%
rename from content/v2.0/reference/flux/functions/math/nextafter.md
rename to content/v2.0/reference/flux/stdlib/math/nextafter.md
index bb6f0ae27..21810bda2 100644
--- a/content/v2.0/reference/flux/functions/math/nextafter.md
+++ b/content/v2.0/reference/flux/stdlib/math/nextafter.md
@@ -1,6 +1,8 @@
---
title: math.nextafter() function
description: The math.nextafter() function returns the next representable float value after `x` towards `y`.
+aliases:
+ - /v2.0/reference/flux/functions/math/nextafter/
menu:
v2_0_ref:
name: math.nextafter
diff --git a/content/v2.0/reference/flux/functions/math/pow.md b/content/v2.0/reference/flux/stdlib/math/pow.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/math/pow.md
rename to content/v2.0/reference/flux/stdlib/math/pow.md
index 3921a8fa1..af935c750 100644
--- a/content/v2.0/reference/flux/functions/math/pow.md
+++ b/content/v2.0/reference/flux/stdlib/math/pow.md
@@ -1,6 +1,8 @@
---
title: math.pow() function
description: The math.pow() function returns `x**y`, the base-x exponential of y.
+aliases:
+ - /v2.0/reference/flux/functions/math/pow/
menu:
v2_0_ref:
name: math.pow
diff --git a/content/v2.0/reference/flux/functions/math/pow10.md b/content/v2.0/reference/flux/stdlib/math/pow10.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/math/pow10.md
rename to content/v2.0/reference/flux/stdlib/math/pow10.md
index bfd8f1f63..234fb5e5b 100644
--- a/content/v2.0/reference/flux/functions/math/pow10.md
+++ b/content/v2.0/reference/flux/stdlib/math/pow10.md
@@ -1,6 +1,8 @@
---
title: math.pow10() function
description: The math.pow10() function returns `10**n`, the base-10 exponential of `n`.
+aliases:
+ - /v2.0/reference/flux/functions/math/pow10/
menu:
v2_0_ref:
name: math.pow10
diff --git a/content/v2.0/reference/flux/functions/math/remainder.md b/content/v2.0/reference/flux/stdlib/math/remainder.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/math/remainder.md
rename to content/v2.0/reference/flux/stdlib/math/remainder.md
index c7056aef3..84563bf28 100644
--- a/content/v2.0/reference/flux/functions/math/remainder.md
+++ b/content/v2.0/reference/flux/stdlib/math/remainder.md
@@ -1,6 +1,8 @@
---
title: math.remainder() function
description: The math.remainder() function returns the IEEE 754 floating-point remainder of `x / y`.
+aliases:
+ - /v2.0/reference/flux/functions/math/remainder/
menu:
v2_0_ref:
name: math.remainder
diff --git a/content/v2.0/reference/flux/functions/math/round.md b/content/v2.0/reference/flux/stdlib/math/round.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/round.md
rename to content/v2.0/reference/flux/stdlib/math/round.md
index 92e040360..c4c60d2ab 100644
--- a/content/v2.0/reference/flux/functions/math/round.md
+++ b/content/v2.0/reference/flux/stdlib/math/round.md
@@ -1,6 +1,8 @@
---
title: math.round() function
description: The math.round() function returns the nearest integer, rounding half away from zero.
+aliases:
+ - /v2.0/reference/flux/functions/math/round/
menu:
v2_0_ref:
name: math.round
diff --git a/content/v2.0/reference/flux/functions/math/roundtoeven.md b/content/v2.0/reference/flux/stdlib/math/roundtoeven.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/roundtoeven.md
rename to content/v2.0/reference/flux/stdlib/math/roundtoeven.md
index 5f78da74a..381470463 100644
--- a/content/v2.0/reference/flux/functions/math/roundtoeven.md
+++ b/content/v2.0/reference/flux/stdlib/math/roundtoeven.md
@@ -1,6 +1,8 @@
---
title: math.roundtoeven() function
description: The math.roundtoeven() function returns the nearest integer, rounding ties to even.
+aliases:
+ - /v2.0/reference/flux/functions/math/roundtoeven/
menu:
v2_0_ref:
name: math.roundtoeven
diff --git a/content/v2.0/reference/flux/functions/math/signbit.md b/content/v2.0/reference/flux/stdlib/math/signbit.md
similarity index 88%
rename from content/v2.0/reference/flux/functions/math/signbit.md
rename to content/v2.0/reference/flux/stdlib/math/signbit.md
index cefb73d90..9826c4e75 100644
--- a/content/v2.0/reference/flux/functions/math/signbit.md
+++ b/content/v2.0/reference/flux/stdlib/math/signbit.md
@@ -1,6 +1,8 @@
---
title: math.signbit() function
description: The math.signbit() function reports whether `x` is negative or negative zero.
+aliases:
+ - /v2.0/reference/flux/functions/math/signbit/
menu:
v2_0_ref:
name: math.signbit
diff --git a/content/v2.0/reference/flux/functions/math/sin.md b/content/v2.0/reference/flux/stdlib/math/sin.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/sin.md
rename to content/v2.0/reference/flux/stdlib/math/sin.md
index 033a005e8..e06aaec4c 100644
--- a/content/v2.0/reference/flux/functions/math/sin.md
+++ b/content/v2.0/reference/flux/stdlib/math/sin.md
@@ -1,6 +1,8 @@
---
title: math.sin() function
description: The math.sin() function returns the sine of the radian argument `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/sin/
menu:
v2_0_ref:
name: math.sin
diff --git a/content/v2.0/reference/flux/functions/math/sincos.md b/content/v2.0/reference/flux/stdlib/math/sincos.md
similarity index 92%
rename from content/v2.0/reference/flux/functions/math/sincos.md
rename to content/v2.0/reference/flux/stdlib/math/sincos.md
index 7d1e25ff2..5536d6001 100644
--- a/content/v2.0/reference/flux/functions/math/sincos.md
+++ b/content/v2.0/reference/flux/stdlib/math/sincos.md
@@ -1,6 +1,8 @@
---
title: math.sincos() function
description: The math.sincos() function returns the values of `math.sin(x:x)` and `math.cos(x:x)`.
+aliases:
+ - /v2.0/reference/flux/functions/math/sincos/
menu:
v2_0_ref:
name: math.sincos
diff --git a/content/v2.0/reference/flux/functions/math/sinh.md b/content/v2.0/reference/flux/stdlib/math/sinh.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/sinh.md
rename to content/v2.0/reference/flux/stdlib/math/sinh.md
index 7dd4def04..800ae94a2 100644
--- a/content/v2.0/reference/flux/functions/math/sinh.md
+++ b/content/v2.0/reference/flux/stdlib/math/sinh.md
@@ -1,6 +1,8 @@
---
title: math.sinh() function
description: The math.sinh() function returns the hyperbolic sine of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/sinh/
menu:
v2_0_ref:
name: math.sinh
diff --git a/content/v2.0/reference/flux/functions/math/sqrt.md b/content/v2.0/reference/flux/stdlib/math/sqrt.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/sqrt.md
rename to content/v2.0/reference/flux/stdlib/math/sqrt.md
index f09e59fa2..88e650c06 100644
--- a/content/v2.0/reference/flux/functions/math/sqrt.md
+++ b/content/v2.0/reference/flux/stdlib/math/sqrt.md
@@ -1,6 +1,8 @@
---
title: math.sqrt() function
description: The math.sqrt() function returns the square root of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/sqrt/
menu:
v2_0_ref:
name: math.sqrt
diff --git a/content/v2.0/reference/flux/functions/math/tan.md b/content/v2.0/reference/flux/stdlib/math/tan.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/tan.md
rename to content/v2.0/reference/flux/stdlib/math/tan.md
index 3fbb9d970..f55bb765f 100644
--- a/content/v2.0/reference/flux/functions/math/tan.md
+++ b/content/v2.0/reference/flux/stdlib/math/tan.md
@@ -1,6 +1,8 @@
---
title: math.tan() function
description: The math.tan() function returns the tangent of the radian argument `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/tan/
menu:
v2_0_ref:
name: math.tan
diff --git a/content/v2.0/reference/flux/functions/math/tanh.md b/content/v2.0/reference/flux/stdlib/math/tanh.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/math/tanh.md
rename to content/v2.0/reference/flux/stdlib/math/tanh.md
index a468720b0..884972b1d 100644
--- a/content/v2.0/reference/flux/functions/math/tanh.md
+++ b/content/v2.0/reference/flux/stdlib/math/tanh.md
@@ -1,6 +1,8 @@
---
title: math.tanh() function
description: The math.tanh() function returns the hyperbolic tangent of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/tanh/
menu:
v2_0_ref:
name: math.tanh
diff --git a/content/v2.0/reference/flux/functions/math/trunc.md b/content/v2.0/reference/flux/stdlib/math/trunc.md
similarity index 90%
rename from content/v2.0/reference/flux/functions/math/trunc.md
rename to content/v2.0/reference/flux/stdlib/math/trunc.md
index e787dbd80..a4c98d682 100644
--- a/content/v2.0/reference/flux/functions/math/trunc.md
+++ b/content/v2.0/reference/flux/stdlib/math/trunc.md
@@ -1,6 +1,8 @@
---
title: math.trunc() function
description: The math.trunc() function returns the integer value of `x`.
+aliases:
+ - /v2.0/reference/flux/functions/math/trunc/
menu:
v2_0_ref:
name: math.trunc
diff --git a/content/v2.0/reference/flux/functions/math/y0.md b/content/v2.0/reference/flux/stdlib/math/y0.md
similarity index 92%
rename from content/v2.0/reference/flux/functions/math/y0.md
rename to content/v2.0/reference/flux/stdlib/math/y0.md
index 7dc7fab13..4dff9fdb7 100644
--- a/content/v2.0/reference/flux/functions/math/y0.md
+++ b/content/v2.0/reference/flux/stdlib/math/y0.md
@@ -1,6 +1,8 @@
---
title: math.y0() function
description: The math.y0() function returns the order-zero Bessel function of the second kind.
+aliases:
+ - /v2.0/reference/flux/functions/math/y0/
menu:
v2_0_ref:
name: math.y0
diff --git a/content/v2.0/reference/flux/functions/math/y1.md b/content/v2.0/reference/flux/stdlib/math/y1.md
similarity index 92%
rename from content/v2.0/reference/flux/functions/math/y1.md
rename to content/v2.0/reference/flux/stdlib/math/y1.md
index b16a38c99..84b79ca1e 100644
--- a/content/v2.0/reference/flux/functions/math/y1.md
+++ b/content/v2.0/reference/flux/stdlib/math/y1.md
@@ -1,6 +1,8 @@
---
title: math.y1() function
description: The math.y1() function returns the order-one Bessel function of the second kind.
+aliases:
+ - /v2.0/reference/flux/functions/math/y1/
menu:
v2_0_ref:
name: math.y1
diff --git a/content/v2.0/reference/flux/functions/math/yn.md b/content/v2.0/reference/flux/stdlib/math/yn.md
similarity index 93%
rename from content/v2.0/reference/flux/functions/math/yn.md
rename to content/v2.0/reference/flux/stdlib/math/yn.md
index 56b951ca5..63e9e14ce 100644
--- a/content/v2.0/reference/flux/functions/math/yn.md
+++ b/content/v2.0/reference/flux/stdlib/math/yn.md
@@ -1,6 +1,8 @@
---
title: math.yn() function
description: The math.yn() function returns the order-n Bessel function of the second kind.
+aliases:
+ - /v2.0/reference/flux/functions/math/yn/
menu:
v2_0_ref:
name: math.yn
diff --git a/content/v2.0/reference/flux/stdlib/monitor/_index.md b/content/v2.0/reference/flux/stdlib/monitor/_index.md
new file mode 100644
index 000000000..b5296bc87
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/monitor/_index.md
@@ -0,0 +1,24 @@
+---
+title: Flux InfluxDB Monitor package
+list_title: InfluxDB Monitor package
+description: >
+ The Flux Monitor package provides tools for monitoring and alerting with InfluxDB.
+ Import the `influxdata/influxdb/monitor` package.
+aliases:
+ - /v2.0/reference/flux/functions/monitor/
+menu:
+ v2_0_ref:
+ name: InfluxDB Monitor
+ parent: Flux standard library
+weight: 202
+v2.0/tags: [functions, monitor, alerts, package]
+---
+
+The Flux monitor package provides tools for monitoring and alerting with InfluxDB.
+Import the `influxdata/influxdb/monitor` package:
+
+```js
+import "influxdata/influxdb/monitor"
+```
+
+{{< children type="functions" show="pages" >}}
diff --git a/content/v2.0/reference/flux/stdlib/monitor/check.md b/content/v2.0/reference/flux/stdlib/monitor/check.md
new file mode 100644
index 000000000..5dcd50a5f
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/monitor/check.md
@@ -0,0 +1,98 @@
+---
+title: monitor.check() function
+description: >
+ The `monitor.check()` function function checks input data and assigns a level
+ (`ok`, `info`, `warn`, or `crit`) to each row based on predicate functions.
+aliases:
+ - /v2.0/reference/flux/functions/monitor/check/
+menu:
+ v2_0_ref:
+ name: monitor.check
+ parent: InfluxDB Monitor
+weight: 202
+---
+
+The `monitor.check()` function function checks input data and assigns a level
+(`ok`, `info`, `warn`, or `crit`) to each row based on predicate functions.
+
+_**Function type:** Transformation_
+
+```js
+import "influxdata/influxdb/monitor"
+
+monitor.check(
+ crit: (r) => r._value > 90.0,
+ warn: (r) => r._value > 80.0,
+ info: (r) => r._value > 60.0,
+ ok: (r) => r._value <= 20.0,
+ messageFn: (r) => "The current level is ${r._level}",
+ data: {}
+)
+```
+
+`monitor.check()` stores statuses in the `_level` column and writes results
+to the `statuses` measurement in the `_monitoring` bucket.
+
+## Parameters
+
+### crit
+Predicate function that determines `crit` status.
+Default is `(r) => false`.
+
+_**Data type:** Function_
+
+### warn
+Predicate function that determines `warn` status.
+Default is `(r) => false`.
+
+_**Data type:** Function_
+
+### info
+Predicate function that determines `info` status.
+Default is `(r) => false`.
+
+_**Data type:** Function_
+
+### ok
+Predicate function that determines `ok` status.
+Default is `(r) => true`.
+
+_**Data type:** Function_
+
+### messageFn
+A function that constructs a message to append to each row.
+The message is stored in the `_message` column.
+
+_**Data type:** Function_
+
+### data
+Data to append to the output.
+**InfluxDB populates check data.**
+
+_**Data type:** Object_
+
+## Examples
+
+### Monitor disk usage
+```js
+import "influxdata/influxdb/monitor"
+
+from(bucket: "telegraf")
+ |> range(start: -1h)
+ |> filter(fn: (r) =>
+ r._measurement == "disk" and
+ r._field = "used_percent"
+ )
+ |> group(columns: ["_measurement"])
+ |> monitor.check(
+ crit: (r) => r._value > 90.0,
+ warn: (r) => r._value > 80.0,
+ info: (r) => r._value > 70.0,
+ ok: (r) => r._value <= 60.0,
+ messageFn: (r) =>
+ if r._level == "crit" then "Critical alert!! Disk usage is at ${r._value}%!"
+ else if r._level == "warn" then "Warning! Disk usage is at ${r._value}%."
+ else if r._level == "info" then "Disk usage is at ${r._value}%."
+ else "Things are looking good."
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/monitor/deadman.md b/content/v2.0/reference/flux/stdlib/monitor/deadman.md
new file mode 100644
index 000000000..870c90ae7
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/monitor/deadman.md
@@ -0,0 +1,48 @@
+---
+title: monitor.deadman() function
+description: >
+ The `monitor.deadman()` function detects when a group stops reporting data.
+aliases:
+ - /v2.0/reference/flux/functions/monitor/deadman/
+menu:
+ v2_0_ref:
+ name: monitor.deadman
+ parent: InfluxDB Monitor
+weight: 202
+cloud_all: true
+---
+
+The `monitor.deadman()` function detects when a group stops reporting data.
+It takes a stream of tables and reports if groups have been observed since time `t`.
+
+_**Function type:** Transformation_
+
+```js
+import "influxdata/influxdb/monitor"
+
+monitor.deadman(t: 2019-08-30T12:30:00Z)
+```
+
+`monitor.deadman()` retains the most recent row from each input table and adds a `dead` column.
+If a record appears **after** time `t`, `monitor.deadman()` sets `dead` to `false`.
+Otherwise, `dead` is set to `true`.
+
+## Parameters
+
+### t
+The time threshold for the deadman check.
+
+_**Data type:** Time_
+
+## Examples
+
+### Detect if a host hasn't reported in the last five minutes
+```js
+import "influxdata/influxdb/monitor"
+import "experimental"
+
+from(bucket: "example-bucket")
+ |> range(start: -10m)
+ |> group(columns: ["host"])
+ |> monitor.deadman(t: experimental.subDuration(d: 5m, from: now() ))
+```
diff --git a/content/v2.0/reference/flux/stdlib/monitor/from.md b/content/v2.0/reference/flux/stdlib/monitor/from.md
new file mode 100644
index 000000000..0dcf3c575
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/monitor/from.md
@@ -0,0 +1,71 @@
+---
+title: monitor.from() function
+description: >
+ The `monitor.from()` function retrieves check statuses stored in the `statuses`
+ measurement in the `_monitoring` bucket.
+aliases:
+ - /v2.0/reference/flux/functions/monitor/from/
+menu:
+ v2_0_ref:
+ name: monitor.from
+ parent: InfluxDB Monitor
+weight: 202
+---
+
+The `monitor.from()` function retrieves check statuses stored in the `statuses`
+measurement in the `_monitoring` bucket.
+
+_**Function type:** Input_
+
+```js
+import "influxdata/influxdb/monitor"
+
+monitor.from(
+ start: -1h,
+ stop: now(),
+ fn: (r) => true
+)
+```
+
+
+## Parameters
+
+### start
+The earliest time to include in results.
+Use a relative duration or absolute time.
+For example, `-1h` or `2019-08-28T22:00:00Z`.
+Durations are relative to `now()`.
+
+_**Data type:** Duration | Time_
+
+### stop
+The latest time to include in results.
+Use a relative duration or absolute time.
+For example, `-1h` or `2019-08-28T22:00:00Z`.
+Durations are relative to `now()`.
+Defaults to `now()`.
+
+_**Data type:** Duration | Time_
+
+{{% note %}}
+Time values in Flux must be in [RFC3339 format](/v2.0/reference/flux/language/types#timestamp-format).
+{{% /note %}}
+
+### fn
+A single argument predicate function that evaluates `true` or `false`.
+Records or rows (`r`) that evaluate to `true` are included in output tables.
+Records that evaluate to _null_ or `false` are not included in output tables.
+
+_**Data type:** Function_
+
+## Examples
+
+### View critical check statuses from the last hour
+```js
+import "influxdata/influxdb/monitor"
+
+monitor.from(
+ start: -1h,
+ fn: (r) => r._level == "crit"
+)
+```
diff --git a/content/v2.0/reference/flux/stdlib/monitor/logs.md b/content/v2.0/reference/flux/stdlib/monitor/logs.md
new file mode 100644
index 000000000..bcf6c4861
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/monitor/logs.md
@@ -0,0 +1,67 @@
+---
+title: monitor.logs() function
+description: >
+ The `monitor.logs()` function retrieves notification events stored in the `notifications`
+ measurement in the `_monitoring` bucket.
+aliases:
+ - /v2.0/reference/flux/functions/monitor/logs/
+menu:
+ v2_0_ref:
+ name: monitor.logs
+ parent: InfluxDB Monitor
+weight: 202
+---
+
+The `monitor.logs()` function retrieves notification events stored in the `notifications`
+measurement in the `_monitoring` bucket.
+
+_**Function type:** Input_
+
+```js
+import "influxdata/influxdb/monitor"
+
+monitor.logs(
+ start: -1h,
+ stop: now(),
+ fn: (r) => true
+)
+```
+
+## Parameters
+
+### start
+The earliest time to include in results.
+Use a relative duration or absolute time.
+For example, `-1h` or `2019-08-28T22:00:00Z`.
+Durations are relative to `now()`.
+
+_**Data type:** Duration | Time_
+
+### stop
+The latest time to include in results.
+Use a relative duration or absolute time.
+For example, `-1h` or `2019-08-28T22:00:00Z`.
+Durations are relative to `now()`.
+Defaults to `now()`.
+
+_**Data type:** Duration | Time_
+
+{{% note %}}
+Time values in Flux must be in [RFC3339 format](/v2.0/reference/flux/language/types#timestamp-format).
+{{% /note %}}
+
+### fn
+A single argument predicate function that evaluates `true` or `false`.
+Records or rows (`r`) that evaluate to `true` are included in output tables.
+Records that evaluate to _null_ or `false` are not included in output tables.
+
+_**Data type:** Function_
+
+## Examples
+
+### Query notification events from the last hour
+```js
+import "influxdata/influxdb/monitor"
+
+monitor.logs(start: -1h)
+```
diff --git a/content/v2.0/reference/flux/stdlib/monitor/notify.md b/content/v2.0/reference/flux/stdlib/monitor/notify.md
new file mode 100644
index 000000000..8d845a80c
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/monitor/notify.md
@@ -0,0 +1,54 @@
+---
+title: monitor.notify() function
+description: >
+ The `monitor.notify()` function sends a notification to an endpoint and logs it
+ in the `notifications` measurement in the `_monitoring` bucket.
+aliases:
+ - /v2.0/reference/flux/functions/monitor/notify/
+menu:
+ v2_0_ref:
+ name: monitor.notify
+ parent: InfluxDB Monitor
+weight: 202
+---
+
+The `monitor.notify()` function sends a notification to an endpoint and logs it
+in the `notifications` measurement in the `_monitoring` bucket.
+
+_**Function type:** Output_
+
+```js
+import "influxdata/influxdb/monitor"
+
+monitor.notify(
+ endpoint: endpoint,
+ data: {}
+)
+```
+
+## Parameters
+
+### endpoint
+A function that constructs and sends the notification to an endpoint.
+
+_**Data type:** Function_
+
+### data
+Data to append to the output.
+**InfluxDB populates notification data.**
+
+_**Data type:** Object_
+
+## Examples
+
+### Send a notification to Slack
+```js
+import "influxdata/influxdb/monitor"
+import "slack"
+
+endpoint = slack.endpoint(name: "slack", channel: "#flux")
+
+from(bucket: "system")
+ |> range(start: -5m)
+ |> monitor.notify(endpoint: endpoint)
+```
diff --git a/content/v2.0/reference/flux/stdlib/monitor/statechanges.md b/content/v2.0/reference/flux/stdlib/monitor/statechanges.md
new file mode 100644
index 000000000..b2e4573a8
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/monitor/statechanges.md
@@ -0,0 +1,56 @@
+---
+title: monitor.stateChanges() function
+description: >
+ The `monitor.stateChanges()` function detects state changes in a stream of data and
+ outputs records that change from `fromLevel` to `toLevel`.
+aliases:
+ - /v2.0/reference/flux/functions/monitor/statechanges/
+menu:
+ v2_0_ref:
+ name: monitor.stateChanges
+ parent: InfluxDB Monitor
+weight: 202
+cloud_all: true
+---
+
+The `monitor.stateChanges()` function detects state changes in a stream of data and
+outputs records that change from `fromLevel` to `toLevel`.
+
+{{% note %}}
+`monitor.stateChanges` operates on data in the `statuses` measurement and requires a `_level` column .
+{{% /note %}}
+
+_**Function type:** Transformation_
+
+```js
+import "influxdata/influxdb/monitor"
+
+monitor.stateChanges(
+ fromLevel: "any",
+ toLevel: "crit"
+)
+```
+
+## Parameters
+
+### fromLevel
+The level to detect a change from.
+Defaults to `"any"`.
+
+_**Data type:** String_
+
+### toLevel
+The level to detect a change to.
+The function output records that change to this level.
+
+_**Data type:** String_
+
+## Examples
+
+### Detect when the state changes to critical
+```js
+import "influxdata/influxdb/monitor"
+
+monitor.from(start: -1h)
+ |> monitor.stateChanges(toLevel: "crit")
+```
diff --git a/content/v2.0/reference/flux/stdlib/pagerduty/_index.md b/content/v2.0/reference/flux/stdlib/pagerduty/_index.md
new file mode 100644
index 000000000..a84ddf85d
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/pagerduty/_index.md
@@ -0,0 +1,24 @@
+---
+title: Flux PagerDuty package
+list_title: PagerDuty package
+description: >
+ The Flux PagerDuty package provides functions for sending data to PagerDuty.
+ Import the `pagerduty` package.
+aliases:
+ - /v2.0/reference/flux/functions/pagerduty/
+menu:
+ v2_0_ref:
+ name: PagerDuty
+ parent: Flux standard library
+weight: 202
+v2.0/tags: [functions, pagerduty, package]
+---
+
+The Flux PagerDuty package provides functions for sending data to PagerDuty.
+Import the `pagerduty` package:
+
+```js
+import "pagerduty"
+```
+
+{{< children type="functions" show="pages" >}}
diff --git a/content/v2.0/reference/flux/stdlib/pagerduty/actionfromseverity.md b/content/v2.0/reference/flux/stdlib/pagerduty/actionfromseverity.md
new file mode 100644
index 000000000..641083e8c
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/pagerduty/actionfromseverity.md
@@ -0,0 +1,44 @@
+---
+title: pagerduty.actionFromSeverity() function
+description: >
+ The `pagerduty.actionFromSeverity()` function converts a severity to a PagerDuty action.
+aliases:
+ - /v2.0/reference/flux/functions/pagerduty/actionfromseverity/
+menu:
+ v2_0_ref:
+ name: pagerduty.actionFromSeverity
+ parent: PagerDuty
+weight: 202
+---
+
+The `pagerduty.actionFromSeverity()` function converts a severity to a PagerDuty action.
+`ok` converts to `resolve`.
+All other severities convert to `trigger`.
+
+_**Function type:** Transformation_
+
+```js
+import "pagerduty"
+
+pagerduty.actionFromSeverity(
+ severity: "ok"
+)
+
+// Returns "resolve"
+```
+
+## Parameters
+
+### severity
+The severity to convert to a PagerDuty action.
+
+_**Data type:** String_
+
+## Function definition
+```js
+import "strings"
+
+actionFromSeverity = (severity) =>
+ if strings.toLower(v: severity) == "ok" then "resolve"
+ else "trigger"
+```
diff --git a/content/v2.0/reference/flux/stdlib/pagerduty/dedupkey.md b/content/v2.0/reference/flux/stdlib/pagerduty/dedupkey.md
new file mode 100644
index 000000000..f34873a08
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/pagerduty/dedupkey.md
@@ -0,0 +1,38 @@
+---
+title: pagerduty.dedupKey() function
+description: >
+ The `pagerduty.dedupKey()` function uses the group key of an input table to
+ generate and store a deduplication key in the `_pagerdutyDedupKey` column.
+aliases:
+ - /v2.0/reference/flux/functions/pagerduty/dedupkey/
+menu:
+ v2_0_ref:
+ name: pagerduty.dedupKey
+ parent: PagerDuty
+weight: 202
+---
+
+The `pagerduty.dedupKey()` function uses the group key of an input table to
+generate and store a deduplication key in the `_pagerdutyDedupKey` column.
+The function sorts, newline-concatenates, SHA256-hashes, and hex-encodes
+the group key to create a unique deduplication key for each input table.
+
+_**Function type:** Transformation_
+
+```js
+import "pagerduty"
+
+pagerduty.dedupKey()
+```
+
+## Examples
+
+##### Add a PagerDuty deduplication key to output data
+```js
+import "pagerduty"
+
+from(bucket: "default")
+ |> range(start: -5m)
+ |> filter(fn: (r) => r._measurement == "mem")
+ |> pagerduty.dedupKey()
+```
diff --git a/content/v2.0/reference/flux/stdlib/pagerduty/endpoint.md b/content/v2.0/reference/flux/stdlib/pagerduty/endpoint.md
new file mode 100644
index 000000000..e42bd1e7a
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/pagerduty/endpoint.md
@@ -0,0 +1,75 @@
+---
+title: pagerduty.endpoint() function
+description: >
+ The `pagerduty.endpoint()` function sends a message to PagerDuty that includes output data.
+aliases:
+ - /v2.0/reference/flux/functions/pagerduty/endpoint/
+menu:
+ v2_0_ref:
+ name: pagerduty.endpoint
+ parent: PagerDuty
+weight: 202
+v2.0/tags: [endpoints]
+---
+
+The `pagerduty.endpoint()` function sends a message to PagerDuty that includes output data.
+
+_**Function type:** Output_
+
+```js
+import "pagerduty"
+
+pagerduty.endpoint(
+ url: "https://events.pagerduty.com/v2/enqueue"
+)
+```
+
+## Parameters
+
+### pagerdutyURL
+The PagerDuty API URL.
+Defaults to `https://events.pagerduty.com/v2/enqueue`.
+
+_**Data type:** String_
+
+### mapFn
+A function that builds the object used to generate the POST request.
+
+{{% note %}}
+_You should rarely need to override the default `mapFn` parameter.
+To see the default `mapFn` value or for insight into possible overrides, view the
+[`pagerduty.endpoint()` source code](https://github.com/influxdata/flux/blob/master/stdlib/pagerduty/pagerduty.flux)._
+{{% /note %}}
+
+_**Data type:** Function_
+
+The returned object must include the following fields:
+
+- `routingKey`
+- `client`
+- `client_url`
+- `class`
+- `group`
+- `severity`
+- `component`
+- `source`
+- `summary`
+- `timestamp`
+
+_For more information, see [`pagerduty.sendEvent()`](/v2.0/reference/flux/stdlib/pagerduty/sendevent/)_
+
+## Examples
+
+##### Send critical statuses to a PagerDuty endpoint
+```js
+import "monitor"
+import "pagerduty"
+
+endpoint = pagerduty.endpoint(token: "mySuPerSecRetTokEn")
+
+from(bucket: "example-bucket")
+ |> range(start: -1m)
+ |> filter(fn: (r) => r._measurement == "statuses" and status == "crit")
+ |> map(fn: (r) => { return {r with status: r._status} })
+ |> monitor.notify(endpoint: endpoint)
+```
diff --git a/content/v2.0/reference/flux/stdlib/pagerduty/sendevent.md b/content/v2.0/reference/flux/stdlib/pagerduty/sendevent.md
new file mode 100644
index 000000000..51bf4abaf
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/pagerduty/sendevent.md
@@ -0,0 +1,131 @@
+---
+title: pagerduty.sendEvent() function
+description: >
+ The `pagerduty.sendEvent()` function sends an event to PagerDuty.
+aliases:
+ - /v2.0/reference/flux/functions/pagerduty/sendevent/
+menu:
+ v2_0_ref:
+ name: pagerduty.sendEvent
+ parent: PagerDuty
+weight: 202
+---
+
+The `pagerduty.sendEvent()` function sends an event to PagerDuty.
+
+_**Function type:** Output_
+
+```js
+import "pagerduty"
+
+pagerduty.sendEvent(
+ pagerdutyURL: "https://events.pagerduty.com/v2/enqueue",
+ routingKey: "ExampleRoutingKey",
+ client: "ExampleClient",
+ clientURL: "http://examplepagerdutyclient.com",
+ dedupkey: "ExampleDedupKey",
+ class: "cpu usage",
+ group: "app-stack",
+ severity: "ok",
+ component: "postgres",
+ source: "monitoringtool:vendor:region",
+ summary: "This is an example summary.",
+ timestamp: "2016-07-17T08:42:58.315+0000"
+)
+```
+
+## Parameters
+
+### pagerdutyURL
+The URL of the PagerDuty endpoint.
+Defaults to `https://events.pagerduty.com/v2/enqueue`.
+
+_**Data type:** String_
+
+### routingKey
+The routing key generated from your PagerDuty integration.
+
+_**Data type:** String_
+
+### client
+The name of the client sending the alert.
+
+_**Data type:** String_
+
+### clientURL
+The URL of the client sending the alert.
+
+_**Data type:** String_
+
+### dedupkey
+A per-alert ID that acts as deduplication key and allows you to acknowledge or
+change the severity of previous messages.
+Supports a maximum of 255 characters.
+
+{{% note %}}
+When using [`pagerduty.endpoint()`](/v2.0/reference/flux/stdlib/pagerduty/endpoint/)
+to send data to PagerDuty, the function uses the [`pagerduty.dedupKey()` function](/v2.0/reference/flux/stdlib/pagerduty/dedupkey/) to populate the `dedupkey` parameter.
+{{% /note %}}
+
+_**Data type:** String_
+
+### class
+The class or type of the event.
+Classes are user-defined.
+For example, `ping failure` or `cpu load`.
+
+_**Data type:** String_
+
+### group
+A logical grouping used by PagerDuty.
+Groups are user-defined.
+For example, `app-stack`.
+
+_**Data type:** String_
+
+### severity
+The severity of the event.
+
+**Valid values include:**
+
+- `critical`
+- `error`
+- `warning`
+- `info`
+
+_**Data type:** String_
+
+### eventAction
+The type of event to send to PagerDuty.
+
+**Valid values include:**
+
+- `trigger`
+- `resolve`
+- `acknowledge`
+
+_**Data type:** String_
+
+### component
+The component of the source machine responsible for the event.
+Components are user-defined.
+For example, `mysql` or `eth0`.
+
+_**Data type:** String_
+
+### source
+The unique location of the affected system.
+For example, the hostname or fully qualified domain name (FQDN).
+
+_**Data type:** String_
+
+### summary
+A brief text summary of the event used as the summaries or titles of associated alerts.
+The maximum permitted length is 1024 characters.
+
+_**Data type:** String_
+
+### timestamp
+The time the detected event occurred in [RFC3339nano format](https://golang.org/pkg/time/#RFC3339Nano).
+
+_**Data type:** String_
diff --git a/content/v2.0/reference/flux/stdlib/pagerduty/severityfromlevel.md b/content/v2.0/reference/flux/stdlib/pagerduty/severityfromlevel.md
new file mode 100644
index 000000000..7e1d829ad
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/pagerduty/severityfromlevel.md
@@ -0,0 +1,57 @@
+---
+title: pagerduty.severityFromLevel() function
+description: >
+ The `pagerduty.severityFromLevel()` function converts an InfluxDB status level to
+ a PagerDuty severity.
+aliases:
+ - /v2.0/reference/flux/functions/pagerduty/severityfromlevel/
+menu:
+ v2_0_ref:
+ name: pagerduty.severityFromLevel
+ parent: PagerDuty
+weight: 202
+---
+
+The `pagerduty.severityFromLevel()` function converts an InfluxDB status level to
+a PagerDuty severity.
+
+_**Function type:** Transformation_
+
+```js
+import "pagerduty"
+
+pagerduty.severityFromLevel(
+ level: "crit"
+)
+
+// Returns "critical"
+```
+
+| Status level | PagerDuty severity |
+|:------------:|:------------------:|
+| `crit` | `critical` |
+| `warn` | `warning` |
+| `info` | `info` |
+| `ok` | `info` |
+
+## Parameters
+
+### level
+The InfluxDB status level to convert to a PagerDuty severity.
+
+_**Data type:** String_
+
+## Function definition
+```js
+import "strings"
+
+severityFromLevel = (level) => {
+ lvl = strings.toLower(v:level)
+ sev = if lvl == "warn" then "warning"
+ else if lvl == "crit" then "critical"
+ else if lvl == "info" then "info"
+ else if lvl == "ok" then "info"
+ else "error"
+ return sev
+}
+```
diff --git a/content/v2.0/reference/flux/stdlib/regexp/_index.md b/content/v2.0/reference/flux/stdlib/regexp/_index.md
new file mode 100644
index 000000000..079929ca2
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/regexp/_index.md
@@ -0,0 +1,24 @@
+---
+title: Flux regular expressions package
+list_title: Regular expressions package
+description: >
+ The Flux regular expressions package includes functions that provide enhanced
+ regular expression functionality. Import the `regexp` package.
+aliases:
+ - /v2.0/reference/flux/functions/regexp/
+menu:
+ v2_0_ref:
+ name: Regular expressions
+ parent: Flux standard library
+weight: 202
+v2.0/tags: [regex, functions]
+---
+
+The Flux regular expressions package includes functions that provide enhanced
+regular expression functionality. Import the `regexp` package.
+
+```js
+import "regexp"
+```
+
+{{< children type="functions" show="pages" >}}
diff --git a/content/v2.0/reference/flux/stdlib/regexp/compile.md b/content/v2.0/reference/flux/stdlib/regexp/compile.md
new file mode 100644
index 000000000..93dea117d
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/regexp/compile.md
@@ -0,0 +1,52 @@
+---
+title: regexp.compile() function
+description: >
+ The `regexp.compile()` function parses a regular expression and, if successful,
+ returns a Regexp object that can be used to match against text.
+aliases:
+ - /v2.0/reference/flux/functions/regexp/compile/
+menu:
+ v2_0_ref:
+ name: regexp.compile
+ parent: Regular expressions
+weight: 301
+---
+
+The `regexp.compile()` function parses a regular expression and, if successful,
+returns a Regexp object that can be used to match against text.
+
+_**Output data type:** Regexp_
+
+```js
+import "regexp"
+
+regexp.compile(v: "abcd")
+
+// Returns the regexp object `abcd`
+```
+
+## Parameters
+
+### v
+The string value to parse into a regular expression.
+
+_**Data type:** String_
+
+## Examples
+
+###### Use a string value as a regular expression
+```js
+import "regexp"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ regexStr: r.regexStr,
+ _value: r._value,
+ firstRegexMatch: findString(
+ r: regexp.compile(v: regexStr),
+ v: r._value
+ )
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/regexp/findstring.md b/content/v2.0/reference/flux/stdlib/regexp/findstring.md
new file mode 100644
index 000000000..d26a1a688
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/regexp/findstring.md
@@ -0,0 +1,53 @@
+---
+title: regexp.findString() function
+description: The `regexp.findString()` function returns the left-most regular expression match in a string.
+aliases:
+ - /v2.0/reference/flux/functions/regexp/findstring/
+menu:
+ v2_0_ref:
+ name: regexp.findString
+ parent: Regular expressions
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/regexp/splitregexp
+---
+
+The `regexp.findString()` function returns the left-most regular expression match in a string.
+
+_**Output data type:** String_
+
+```js
+import "regexp"
+
+findString(r: /foo.?/, v: "seafood fool")
+
+// Returns "food"
+```
+
+## Parameters
+
+### r
+The regular expression used to search `v`.
+
+_**Data type:** Regexp_
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+## Examples
+
+###### Find the first regular expression match in each row
+```js
+import "regexp"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ message: r.message,
+ regexp: r.regexp,
+ match: regexp.findString(r: r.regexp, v: r.message)
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/regexp/findstringindex.md b/content/v2.0/reference/flux/stdlib/regexp/findstringindex.md
new file mode 100644
index 000000000..80e4d4ff1
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/regexp/findstringindex.md
@@ -0,0 +1,59 @@
+---
+title: regexp.findStringIndex() function
+description: >
+ The `regexp.findStringIndex()` function returns a two-element array of integers defining
+ the beginning and ending indexes of the left-most regular expression match in a string.
+aliases:
+ - /v2.0/reference/flux/functions/regexp/findstringindex/
+menu:
+ v2_0_ref:
+ name: regexp.findStringIndex
+ parent: Regular expressions
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/regexp/compile
+---
+
+The `regexp.findStringIndex()` function returns a two-element array of integers defining
+the beginning and ending indexes of the left-most regular expression match in a string.
+
+_**Output data type:** Array of Integers_
+
+```js
+import "regexp"
+
+regexp.findStringIndex(r: /ab?/, v: "tablet")
+
+// Returns [1, 3]
+```
+
+## Parameters
+
+### r
+The regular expression used to search `v`.
+
+_**Data type:** Regexp_
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+## Examples
+
+###### Index the bounds of first regular expression match in each row
+```js
+import "regexp"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ regexStr: r.regexStr,
+ _value: r._value,
+ matchIndex: regexp.findStringIndex(
+ r: regexp.compile(r.regexStr),
+ v: r._value
+ )
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/regexp/getstring.md b/content/v2.0/reference/flux/stdlib/regexp/getstring.md
new file mode 100644
index 000000000..7c3c39992
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/regexp/getstring.md
@@ -0,0 +1,47 @@
+---
+title: regexp.getString() function
+description: The `regexp.getString()` function returns the source string used to compile a regular expression.
+aliases:
+ - /v2.0/reference/flux/functions/regexp/getstring/
+menu:
+ v2_0_ref:
+ name: regexp.getString
+ parent: Regular expressions
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/regexp/compile
+---
+
+The `regexp.getString()` function returns the source string used to compile a regular expression.
+
+_**Output data type:** String_
+
+```js
+import "regexp"
+
+regexp.getString(r: /[a-zA-Z]/)
+
+// Returns "[a-zA-Z]"
+```
+
+## Parameters
+
+### r
+The regular expression object to convert to a string.
+
+_**Data type:** Regexp_
+
+## Examples
+
+###### Convert regular expressions into strings in each row
+```js
+import "regexp"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ regex: r.regex,
+ regexStr: regexp.getString(r: r.regex)
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/regexp/matchregexpstring.md b/content/v2.0/reference/flux/stdlib/regexp/matchregexpstring.md
new file mode 100644
index 000000000..ec8c6a113
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/regexp/matchregexpstring.md
@@ -0,0 +1,53 @@
+---
+title: regexp.matchRegexpString() function
+description: >
+ The `regexp.matchRegexpString()` function tests if a string contains any match
+ to a regular expression.
+aliases:
+ - /v2.0/reference/flux/functions/regexp/matchregexpstring/
+menu:
+ v2_0_ref:
+ name: regexp.matchRegexpString
+ parent: Regular expressions
+weight: 301
+---
+
+The `regexp.matchRegexpString()` function tests if a string contains any match
+to a regular expression.
+
+_**Output data type:** Boolean_
+
+```js
+import "regexp"
+
+regexp.matchRegexpString(r: /(gopher){2}/, v: "gophergophergopher")
+
+// Returns true
+```
+
+## Parameters
+
+### r
+The regular expression used to search `v`.
+
+_**Data type:** Regexp_
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+## Examples
+
+###### Filter by columns that contain matches to a regular expression
+```js
+import "regexp"
+
+data
+ |> filter(fn: (r) =>
+ regexp.matchRegexpString(
+ r: /Alert\:/,
+ v: r.message
+ )
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/regexp/quotemeta.md b/content/v2.0/reference/flux/stdlib/regexp/quotemeta.md
new file mode 100644
index 000000000..92916a540
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/regexp/quotemeta.md
@@ -0,0 +1,46 @@
+---
+title: regexp.quoteMeta() function
+description: >
+ The `regexp.quoteMeta()` function escapes all regular expression metacharacters inside of a string.
+aliases:
+ - /v2.0/reference/flux/functions/regexp/quotemeta/
+menu:
+ v2_0_ref:
+ name: regexp.quoteMeta
+ parent: Regular expressions
+weight: 301
+---
+
+The `regexp.quoteMeta()` function escapes all regular expression metacharacters inside of a string.
+
+_**Output data type:** String_
+
+```js
+import "regexp"
+
+regexp.quoteMeta(v: ".+*?()|[]{}^$")
+
+// Returns "\.\+\*\?\(\)\|\[\]\{\}\^\$"
+```
+
+## Parameters
+
+### v
+The string that contains regular expression metacharacters to escape.
+
+_**Data type:** String_
+
+## Examples
+
+###### Escape regular expression meta characters in column values
+```js
+import "regexp"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ notes: r.notes,
+ notes_escaped: regexp.quoteMeta(v: r.notes)
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/regexp/replaceallstring.md b/content/v2.0/reference/flux/stdlib/regexp/replaceallstring.md
new file mode 100644
index 000000000..7a84dd44c
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/regexp/replaceallstring.md
@@ -0,0 +1,61 @@
+---
+title: regexp.replaceAllString() function
+description: >
+ The `regexp.replaceAllString()` function replaces all regular expression matches
+ in a string with a specified replacement.
+aliases:
+ - /v2.0/reference/flux/functions/regexp/replaceallstring/
+menu:
+ v2_0_ref:
+ name: regexp.replaceAllString
+ parent: Regular expressions
+weight: 301
+---
+
+The `regexp.replaceAllString()` function replaces all regular expression matches
+in a string with a specified replacement.
+
+_**Output data type:** String_
+
+```js
+import "regexp"
+
+regexp.replaceAllString(r: /a(x*)b/, v: "-ab-axxb-", t: "T")
+
+// Returns "-T-T-"
+```
+
+## Parameters
+
+### r
+The regular expression used to search `v`.
+
+_**Data type:** Regexp_
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### t
+The replacement for matches to `r`.
+
+_**Data type:** String_
+
+## Examples
+
+###### Replace regular expression matches in string column values
+```js
+import "regexp"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ message: r.message,
+ updated_message: regexp.replaceAllString(
+ r: /cat|bird|ferret/,
+ v: r.message,
+ t: "dog"
+ )
+ }))
+```
diff --git a/content/v2.0/reference/flux/stdlib/regexp/splitregexp.md b/content/v2.0/reference/flux/stdlib/regexp/splitregexp.md
new file mode 100644
index 000000000..eaddf0add
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/regexp/splitregexp.md
@@ -0,0 +1,43 @@
+---
+title: regexp.splitRegexp() function
+description: >
+ The `regexp.splitRegexp()` function splits a string into substrings separated by
+ regular expression matches and returns an array of `i` substrings between matches.
+aliases:
+ - /v2.0/reference/flux/functions/regexp/splitregexp/
+menu:
+ v2_0_ref:
+ name: regexp.splitRegexp
+ parent: Regular expressions
+weight: 301
+---
+
+The `regexp.splitRegexp()` function splits a string into substrings separated by
+regular expression matches and returns an array of `i` substrings between matches.
+
+_**Output data type:** Array of Strings_
+
+```js
+import "regexp"
+
+regexp.splitRegexp(r: /a*/, v: "abaabaccadaaae", i: 5)
+
+// Returns ["", "b", "b", "c", "cadaaae"]
+```
+
+## Parameters
+
+### r
+The regular expression used to search `v`.
+
+_**Data type:** Regexp_
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### i
+The number of substrings to return.
+
+_**Data type:** Integer_
diff --git a/content/v2.0/reference/flux/stdlib/runtime/_index.md b/content/v2.0/reference/flux/stdlib/runtime/_index.md
new file mode 100644
index 000000000..690559ff1
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/runtime/_index.md
@@ -0,0 +1,24 @@
+---
+title: Flux runtime package
+list_title: Runtime package
+description: >
+ The Flux runtime package includes functions that provide information about the
+ current Flux runtime. Import the `runtime` package.
+aliases:
+ - /v2.0/reference/flux/functions/runtime/
+menu:
+ v2_0_ref:
+ name: Runtime
+ parent: Flux standard library
+weight: 202
+v2.0/tags: [runtime, functions, package]
+---
+
+The Flux runtime package includes functions that provide information about the
+current Flux runtime. Import the `runtime` package:
+
+```js
+import "runtime"
+```
+
+{{< children type="functions" show="pages" >}}
diff --git a/content/v2.0/reference/flux/stdlib/runtime/version.md b/content/v2.0/reference/flux/stdlib/runtime/version.md
new file mode 100644
index 000000000..f7aaddf27
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/runtime/version.md
@@ -0,0 +1,22 @@
+---
+title: runtime.version() function
+description: The `runtime.version()` function returns the current Flux version.
+aliases:
+ - /v2.0/reference/flux/functions/runtime/version/
+menu:
+ v2_0_ref:
+ name: runtime.version
+ parent: Runtime
+weight: 401
+---
+
+The `runtime.version()` function returns the current Flux version.
+
+_**Function type:** Miscellaneous_
+_**Output data type:** String_
+
+```js
+import "runtime"
+
+runtime.version()
+```
diff --git a/content/v2.0/reference/flux/stdlib/secrets/_index.md b/content/v2.0/reference/flux/stdlib/secrets/_index.md
new file mode 100644
index 000000000..fb9a57a5b
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/secrets/_index.md
@@ -0,0 +1,24 @@
+---
+title: Flux InfluxDB Secrets package
+list_title: InfluxDB Secrets package
+description: >
+ The Flux InfluxDB Secrets package provides functions for working with sensitive secrets managed by InfluxDB.
+ Import the `influxdata/influxdb/secrets` package.
+aliases:
+ - /v2.0/reference/flux/functions/secrets/
+menu:
+ v2_0_ref:
+ name: InfluxDB Secrets
+ parent: Flux standard library
+weight: 202
+v2.0/tags: [functions, secrets, package]
+---
+
+InfluxDB Secrets Flux functions provide tools for working with sensitive secrets managed by InfluxDB.
+Import the `influxdata/influxdb/secrets` package:
+
+```js
+import "influxdata/influxdb/secrets"
+```
+
+{{< children type="functions" show="pages" >}}
diff --git a/content/v2.0/reference/flux/stdlib/secrets/get.md b/content/v2.0/reference/flux/stdlib/secrets/get.md
new file mode 100644
index 000000000..1ce2fcb2b
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/secrets/get.md
@@ -0,0 +1,47 @@
+---
+title: secrets.get() function
+description: >
+ The `secrets.get()` function retrieves a secret from the InfluxDB secret store.
+aliases:
+ - /v2.0/reference/flux/functions/secrets/get/
+menu:
+ v2_0_ref:
+ name: secrets.get
+ parent: InfluxDB Secrets
+weight: 202
+---
+
+The `secrets.get()` function retrieves a secret from the
+[InfluxDB secret store](/v2.0/security/secrets/).
+
+_**Function type:** Miscellaneous_
+
+```js
+import "influxdata/influxdb/secrets"
+
+secrets.get(key: "KEY_NAME")
+```
+
+## Parameters
+
+### key
+The secret key to retrieve.
+
+_**Data type:** String_
+
+## Examples
+
+### Populate sensitive credentials with secrets
+```js
+import "sql"
+import "influxdata/influxdb/secrets"
+
+username = secrets.get(key: "POSTGRES_USERNAME")
+password = secrets.get(key: "POSTGRES_PASSWORD")
+
+sql.from(
+ driverName: "postgres",
+ dataSourceName: "postgresql://${username}:${password}@localhost",
+ query:"SELECT * FROM example-table"
+)
+```
diff --git a/content/v2.0/reference/flux/stdlib/slack/_index.md b/content/v2.0/reference/flux/stdlib/slack/_index.md
new file mode 100644
index 000000000..14c73c63b
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/slack/_index.md
@@ -0,0 +1,24 @@
+---
+title: Flux Slack package
+list_title: Slack package
+description: >
+ The Flux Slack package provides functions for sending data to Slack.
+ Import the `slack` package.
+aliases:
+ - /v2.0/reference/flux/functions/slack/
+menu:
+ v2_0_ref:
+ name: Slack
+ parent: Flux standard library
+weight: 202
+v2.0/tags: [functions, slack, package]
+---
+
+The Flux Slack package provides functions for sending data to Slack.
+Import the `slack` package:
+
+```js
+import "slack"
+```
+
+{{< children type="functions" show="pages" >}}
diff --git a/content/v2.0/reference/flux/stdlib/slack/endpoint.md b/content/v2.0/reference/flux/stdlib/slack/endpoint.md
new file mode 100644
index 000000000..22b99da8e
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/slack/endpoint.md
@@ -0,0 +1,88 @@
+---
+title: slack.endpoint() function
+description: >
+ The `slack.endpoint()` function sends a message to Slack that includes output data.
+aliases:
+ - /v2.0/reference/flux/functions/slack/endpoint/
+menu:
+ v2_0_ref:
+ name: slack.endpoint
+ parent: Slack
+weight: 202
+v2.0/tags: [endpoints]
+---
+
+The `slack.endpoint()` function sends a message to Slack that includes output data.
+
+_**Function type:** Output_
+
+```js
+import "slack"
+
+slack.endpoint(
+ url: "https://slack.com/api/chat.postMessage",
+ token: "mySuPerSecRetTokEn"
+)
+```
+
+## Parameters
+
+### url
+The Slack API URL.
+Defaults to `https://slack.com/api/chat.postMessage`.
+
+{{% note %}}
+If using a Slack webhook, you'll receive a Slack webhook URL when you
+[create an incoming webhook](https://api.slack.com/incoming-webhooks#create_a_webhook).
+{{% /note %}}
+
+_**Data type:** String_
+
+### token
+The [Slack API token](https://get.slack.help/hc/en-us/articles/215770388-Create-and-regenerate-API-tokens)
+used to interact with Slack.
+Defaults to `""`.
+
+{{% note %}}
+A token is only required if using the Slack chat.postMessage API.
+{{% /note %}}
+
+_**Data type:** String_
+
+### mapFn
+A function that builds the object used to generate the POST request.
+
+{{% note %}}
+_You should rarely need to override the default `mapFn` parameter.
+To see the default `mapFn` value or for insight into possible overrides, view the
+[`slack.endpoint()` source code](https://github.com/influxdata/flux/blob/master/stdlib/slack/slack.flux)._
+{{% /note %}}
+
+_**Data type:** Function_
+
+The returned object must include the following fields:
+
+- `username`
+- `channel`
+- `workspace`
+- `text`
+- `iconEmoji`
+- `color`
+
+_For more information, see [`slack.message()`](/v2.0/reference/flux/stdlib/slack/message/)_
+
+## Examples
+
+##### Send critical statuses to a Slack endpoint
+```js
+import "monitor"
+import "slack"
+
+endpoint = slack.endpoint(token: "mySuPerSecRetTokEn")
+
+from(bucket: "example-bucket")
+ |> range(start: -1m)
+ |> filter(fn: (r) => r._measurement == "statuses" and status == "crit")
+ |> map(fn: (r) => { return {r with status: r._status} })
+ |> monitor.notify(endpoint: endpoint)
+```
diff --git a/content/v2.0/reference/flux/stdlib/slack/message.md b/content/v2.0/reference/flux/stdlib/slack/message.md
new file mode 100644
index 000000000..28b0ddcb8
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/slack/message.md
@@ -0,0 +1,126 @@
+---
+title: slack.message() function
+description: >
+ The `slack.message()` function sends a single message to a Slack channel.
+ The function works with either with the chat.postMessage API or with a Slack webhook.
+aliases:
+ - /v2.0/reference/flux/functions/slack/message/
+menu:
+ v2_0_ref:
+ name: slack.message
+ parent: Slack
+weight: 202
+---
+
+The `slack.message()` function sends a single message to a Slack channel.
+The function works with either with the [chat.postMessage API](https://api.slack.com/methods/chat.postMessage)
+or with a [Slack webhook](https://api.slack.com/incoming-webhooks).
+
+_**Function type:** Output_
+
+```js
+import "slack"
+
+slack.message(
+ url: "https://slack.com/api/chat.postMessage",
+ token: "mySuPerSecRetTokEn",
+ username: "Fluxtastic",
+ channel: "#flux",
+ workspace: "",
+ text: "This is a message from the Flux slack.message() function.",
+ iconEmoji: "wave",
+ color: "good"
+)
+```
+
+## Parameters
+
+### url
+The Slack API URL.
+Defaults to `https://slack.com/api/chat.postMessage`.
+
+{{% note %}}
+If using a Slack webhook, you'll receive a Slack webhook URL when you
+[create an incoming webhook](https://api.slack.com/incoming-webhooks#create_a_webhook).
+{{% /note %}}
+
+_**Data type:** String_
+
+### token
+The [Slack API token](https://get.slack.help/hc/en-us/articles/215770388-Create-and-regenerate-API-tokens)
+used to interact with Slack.
+Defaults to `""`.
+
+{{% note %}}
+A token is only required if using the Slack chat.postMessage API.
+{{% /note %}}
+
+_**Data type:** String_
+
+### username
+The username to use when posting the message to a Slack channel. Required
+
+_**Data type:** String_
+
+### channel
+The name of channel to post the message to. Required
+
+_**Data type:** String_
+
+### workspace
+The name of the Slack workspace to use if there are multiple.
+Defaults to `""`.
+
+_**Data type:** String_
+
+### text
+The text to display in the Slack message. Required
+
+_**Data type:** String_
+
+### iconEmoji
+The name of emoji to use as the user avatar when posting the message to Slack.
+Required
+
+_**Data type:** String_
+
+{{% note %}}
+#### Things to know about iconEmoji
+- **Do not** enclose the name in colons `:` as you do in the Slack client.
+- `iconEmoji` only appears as the user avatar when using the Slack chat.postMessage API.
+{{% /note %}}
+
+### color
+The color to include with the message.
+Required
+
+**Valid values include:**
+
+- `good`
+- `warning`
+- `danger`
+- Any valid RGB hex color code. For example, `#439FE0`.
+
+_**Data type:** String_
+
+## Examples
+
+##### Send the last reported status to Slack
+```js
+import "slack"
+
+lastReported =
+ from(bucket: "example-bucket")
+ |> range(start: -1m)
+ |> filter(fn: (r) => r._measurement == "statuses")
+ |> last()
+ |> map(fn: (r) => { return {status: r._status} })
+
+slack.message(
+ url: "https://slack.com/api/chat.postMessage",
+ token: "mySuPerSecRetTokEn",
+ username: "johndoe",
+ channel: "#system-status",
+ text: "The last reported status was \"${lastReported.status}\"."
+)
+```
diff --git a/content/v2.0/reference/flux/stdlib/sql/_index.md b/content/v2.0/reference/flux/stdlib/sql/_index.md
new file mode 100644
index 000000000..14574013c
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/sql/_index.md
@@ -0,0 +1,24 @@
+---
+title: Flux SQL package
+list_title: SQL package
+description: >
+ The Flux SQL package provides tools for working with data in SQL databases such as MySQL and PostgreSQL.
+ Import the `sql` package.
+aliases:
+ - /v2.0/reference/flux/functions/sql/
+menu:
+ v2_0_ref:
+ name: SQL
+ parent: Flux standard library
+weight: 202
+v2.0/tags: [functions, sql, package, mysql, postgres]
+---
+
+SQL Flux functions provide tools for working with data in SQL databases such as MySQL and PostgreSQL.
+Import the `sql` package:
+
+```js
+import "sql"
+```
+
+{{< children type="functions" show="pages" >}}
diff --git a/content/v2.0/reference/flux/stdlib/sql/from.md b/content/v2.0/reference/flux/stdlib/sql/from.md
new file mode 100644
index 000000000..1931f2fc5
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/sql/from.md
@@ -0,0 +1,81 @@
+---
+title: sql.from() function
+description: The `sql.from()` function retrieves data from a SQL data source.
+aliases:
+ - /v2.0/reference/flux/functions/sql/from/
+menu:
+ v2_0_ref:
+ name: sql.from
+ parent: SQL
+weight: 202
+---
+
+The `sql.from()` function retrieves data from a SQL data source.
+
+_**Function type:** Input_
+
+```js
+import "sql"
+
+sql.from(
+ driverName: "postgres",
+ dataSourceName: "postgresql://user:password@localhost",
+ query:"SELECT * FROM TestTable"
+)
+```
+
+## Parameters
+
+### driverName
+The driver used to connect to the SQL database.
+
+_**Data type:** String_
+
+The following drivers are available:
+
+- mysql
+- postgres
+
+### dataSourceName
+The connection string used to connect to the SQL database.
+The string's form and structure depend on the [driver](#drivername) used.
+
+_**Data type:** String_
+
+##### Driver dataSourceName examples
+```sh
+# Postgres Driver:
+postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full
+
+# MySQL Driver:
+username:password@tcp(localhost:3306)/dbname?param=value
+```
+
+### query
+The query to run against the SQL database.
+
+_**Data type:** String_
+
+## Examples
+
+### Query a MySQL database
+```js
+import "sql"
+
+sql.from(
+ driverName: "mysql",
+ dataSourceName: "user:password@tcp(localhost:3306)/db",
+ query:"SELECT * FROM ExampleTable"
+)
+```
+
+### Query a Postgres database
+```js
+import "sql"
+
+sql.from(
+ driverName: "postgres",
+ dataSourceName: "postgresql://user:password@localhost",
+ query:"SELECT * FROM ExampleTable"
+)
+```
diff --git a/content/v2.0/reference/flux/stdlib/sql/to.md b/content/v2.0/reference/flux/stdlib/sql/to.md
new file mode 100644
index 000000000..6b9e7a0b4
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/sql/to.md
@@ -0,0 +1,81 @@
+---
+title: sql.to() function
+description: The `sql.to()` function writes data to a SQL database.
+aliases:
+ - /v2.0/reference/flux/functions/sql/to/
+menu:
+ v2_0_ref:
+ name: sql.to
+ parent: SQL
+weight: 202
+---
+
+The `sql.to()` function writes data to a SQL database.
+
+_**Function type:** Output_
+
+```js
+import "sql"
+
+sql.to(
+ driverName: "mysql",
+ dataSourceName: "username:password@tcp(localhost:3306)/dbname?param=value",
+ table: "ExampleTable"
+)
+```
+
+## Parameters
+
+### driverName
+The driver used to connect to the SQL database.
+
+_**Data type:** String_
+
+The following drivers are available:
+
+- mysql
+- postgres
+
+### dataSourceName
+The connection string used to connect to the SQL database.
+The string's form and structure depend on the [driver](#drivername) used.
+
+_**Data type:** String_
+
+##### Driver dataSourceName examples
+```sh
+# Postgres Driver
+postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full
+
+# MySQL Driver
+username:password@tcp(localhost:3306)/dbname?param=value
+```
+
+### table
+The destination table.
+
+_**Data type:** String_
+
+## Examples
+
+### Write data to a MySQL database
+```js
+import "sql"
+
+sql.to(
+ driverName: "mysql",
+ dataSourceName: "user:password@tcp(localhost:3306)/db",
+ table: "ExampleTable"
+)
+```
+
+### Write data to a Postgres database
+```js
+import "sql"
+
+sql.to(
+ driverName: "postgres",
+ dataSourceName: "postgresql://user:password@localhost",
+ table: "ExampleTable"
+)
+```
diff --git a/content/v2.0/reference/flux/functions/strings/_index.md b/content/v2.0/reference/flux/stdlib/strings/_index.md
similarity index 82%
rename from content/v2.0/reference/flux/functions/strings/_index.md
rename to content/v2.0/reference/flux/stdlib/strings/_index.md
index 8a3ed526d..9cdc9225c 100644
--- a/content/v2.0/reference/flux/functions/strings/_index.md
+++ b/content/v2.0/reference/flux/stdlib/strings/_index.md
@@ -4,11 +4,13 @@ list_title: Strings package
description: >
The Flux strings package provides functions to manipulate UTF-8 encoded strings.
Import the `strings` package.
+aliases:
+ - /v2.0/reference/flux/functions/strings/
menu:
v2_0_ref:
name: Strings
- parent: Flux packages and functions
-weight: 204
+ parent: Flux standard library
+weight: 202
v2.0/tags: [strings, functions, package]
---
diff --git a/content/v2.0/reference/flux/stdlib/strings/compare.md b/content/v2.0/reference/flux/stdlib/strings/compare.md
new file mode 100644
index 000000000..2f265cf1d
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/compare.md
@@ -0,0 +1,56 @@
+---
+title: strings.compare() function
+description: The strings.compare() function compares the lexicographical order of two strings.
+aliases:
+ - /v2.0/reference/flux/functions/strings/compare/
+menu:
+ v2_0_ref:
+ name: strings.compare
+ parent: Strings
+weight: 301
+---
+
+The `strings.compare()` function compares the lexicographical order of two strings.
+
+_**Output data type:** Integer_
+
+```js
+import "strings"
+
+strings.compare(v: "a", t: "b")
+
+// returns -1
+```
+
+#### Return values
+| Comparison | Return value |
+|:----------:|:------------:|
+| `v < t` | `-1` |
+| `v == t` | `0` |
+| `v > t` | `1` |
+
+## Parameters
+
+### v
+The string value to compare.
+
+_**Data type:** String_
+
+### t
+The string value to compare against.
+
+_**Data type:** String_
+
+## Examples
+
+###### Compare the lexicographical order of column values
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ _value: strings.compare(v: r.tag1, t: r.tag2)
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/containsany.md b/content/v2.0/reference/flux/stdlib/strings/containsany.md
new file mode 100644
index 000000000..1398d51e0
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/containsany.md
@@ -0,0 +1,54 @@
+---
+title: strings.containsAny() function
+description: >
+ The strings.containsAny() function reports whether a specified string contains
+ any characters from from another string.
+aliases:
+ - /v2.0/reference/flux/functions/strings/containsany/
+menu:
+ v2_0_ref:
+ name: strings.containsAny
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/containsstr
+---
+
+The `strings.containsAny()` function reports whether a specified string contains
+characters from another string.
+
+_**Output data type:** Boolean_
+
+```js
+import "strings"
+
+strings.containsAny(v: "abc", chars: "and")
+
+// returns true
+```
+
+## Parameters
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### chars
+Characters to search for.
+
+_**Data type:** String_
+
+## Examples
+
+###### Report if a string contains specific characters
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ _value: strings.containsAny(v: r.price, chars: "£$¢")
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/containsstr.md b/content/v2.0/reference/flux/stdlib/strings/containsstr.md
new file mode 100644
index 000000000..fded2ed3a
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/containsstr.md
@@ -0,0 +1,51 @@
+---
+title: strings.containsStr() function
+description: The strings.containsStr() function reports whether a string contains a specified substring.
+aliases:
+ - /v2.0/reference/flux/functions/strings/containsstr/
+menu:
+ v2_0_ref:
+ name: strings.containsStr
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/containsany
+---
+
+The `strings.containsStr()` function reports whether a string contains a specified substring.
+
+_**Output data type:** Boolean_
+
+```js
+import "strings"
+
+strings.containsStr(v: "This and that", substr: "and")
+
+// returns true
+```
+
+## Parameters
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### substr
+The substring value to search for.
+
+_**Data type:** String_
+
+## Examples
+
+###### Report if a string contains a specific substring
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ _value: strings.containsStr(v: r.author, substr: "John")
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/countstr.md b/content/v2.0/reference/flux/stdlib/strings/countstr.md
new file mode 100644
index 000000000..f7b71a4a0
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/countstr.md
@@ -0,0 +1,63 @@
+---
+title: strings.countStr() function
+description: >
+ The strings.countStr() function counts the number of non-overlapping instances
+ of a substring appears in a string.
+aliases:
+ - /v2.0/reference/flux/functions/strings/countstr/
+menu:
+ v2_0_ref:
+ name: strings.countStr
+ parent: Strings
+weight: 301
+---
+
+The `strings.countStr()` function counts the number of non-overlapping instances
+of a substring appears in a string.
+
+_**Output data type:** Integer_
+
+```js
+import "strings"
+
+strings.countStr(v: "Hello mellow fellow", substr: "ello")
+
+// returns 3
+```
+
+## Parameters
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### substr
+The substring to count.
+
+_**Data type:** String_
+
+{{% note %}}
+The function counts only non-overlapping instances of `substr`.
+For example:
+
+```js
+strings.coutnStr(v: "ooooo", substr: "oo")
+
+// Returns 2 -- (oo)(oo)o
+```
+{{% /note %}}
+
+## Examples
+
+###### Count instances of a substring within a string
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ _value: strings.countStr(v: r.message, substr: "uh")
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/equalfold.md b/content/v2.0/reference/flux/stdlib/strings/equalfold.md
new file mode 100644
index 000000000..d49d0c4cb
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/equalfold.md
@@ -0,0 +1,54 @@
+---
+title: strings.equalFold() function
+description: >
+ The strings.equalFold() function reports whether two UTF-8 strings are equal
+ under Unicode case-folding.
+aliases:
+ - /v2.0/reference/flux/functions/strings/equalfold/
+menu:
+ v2_0_ref:
+ name: strings.equalFold
+ parent: Strings
+weight: 301
+---
+
+The `strings.equalFold()` function reports whether two UTF-8 strings are equal
+under Unicode case-folding.
+
+_**Output data type:** Boolean_
+
+```js
+import "strings"
+
+strings.equalFold(v: "Go", t: "go")
+
+// returns true
+```
+
+## Parameters
+
+### v
+The string value to compare.
+
+_**Data type:** String_
+
+### t
+The string value to compare against.
+
+_**Data type:** String_
+
+## Examples
+
+###### Ignore case when testing if two strings are the same
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ string1: r.string1,
+ string2: r.string2,
+ same: strings.equalFold(v: r.string1, t: r.string2)
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/hasprefix.md b/content/v2.0/reference/flux/stdlib/strings/hasprefix.md
new file mode 100644
index 000000000..200aca3cf
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/hasprefix.md
@@ -0,0 +1,45 @@
+---
+title: strings.hasPrefix() function
+description: The strings.hasPrefix() function indicates if a string begins with a specific prefix.
+aliases:
+ - /v2.0/reference/flux/functions/strings/hasprefix/
+menu:
+ v2_0_ref:
+ name: strings.hasPrefix
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/hassuffix
+---
+
+The `strings.hasPrefix()` function indicates if a string begins with a specified prefix.
+
+_**Output data type:** Boolean_
+
+```js
+import "strings"
+
+strings.hasPrefix(v: "go gopher", t: "go")
+
+// returns true
+```
+
+## Parameters
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### t
+The prefix to search for.
+
+_**Data type:** String_
+
+###### Filter based on the presence of a prefix in a column value
+```js
+import "strings"
+
+data
+ |> filter(fn:(r) => strings.hasPrefix(v: r.metric, t: "int_" ))
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/hassuffix.md b/content/v2.0/reference/flux/stdlib/strings/hassuffix.md
new file mode 100644
index 000000000..dfc66563a
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/hassuffix.md
@@ -0,0 +1,45 @@
+---
+title: strings.hasSuffix() function
+description: The strings.hasSuffix() function indicates if a string ends with a specified suffix.
+aliases:
+ - /v2.0/reference/flux/functions/strings/hassuffix/
+menu:
+ v2_0_ref:
+ name: strings.hasSuffix
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/hasprefix
+---
+
+The `strings.hasSuffix()` function indicates if a string ends with a specified suffix.
+
+_**Output data type:** Boolean_
+
+```js
+import "strings"
+
+strings.hasSuffix(v: "go gopher", t: "go")
+
+// returns false
+```
+
+## Parameters
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### t
+The suffix to search for.
+
+_**Data type:** String_
+
+###### Filter based on the presence of a suffix in a column value
+```js
+import "strings"
+
+data
+ |> filter(fn:(r) => strings.hasSuffix(v: r.metric, t: "_count" ))
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/index-func.md b/content/v2.0/reference/flux/stdlib/strings/index-func.md
new file mode 100644
index 000000000..7e7119871
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/index-func.md
@@ -0,0 +1,56 @@
+---
+title: strings.index() function
+description: >
+ The strings.index() function returns the index of the first instance of a substring
+ in another string.
+aliases:
+ - /v2.0/reference/flux/functions/strings/index-func/
+menu:
+ v2_0_ref:
+ name: strings.index
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/indexany/
+ - /v2.0/reference/flux/stdlib/strings/lastindex/
+ - /v2.0/reference/flux/stdlib/strings/lastindexany/
+---
+
+The `strings.index()` function returns the index of the first instance of a substring
+in a string. If the substring is not present, it returns `-1`.
+
+_**Output data type:** Integer_
+
+```js
+import "strings"
+
+strings.index(v: "go gopher", substr: "go")
+
+// returns 0
+```
+
+## Parameters
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### substr
+The substring to search for.
+
+_**Data type:** String_
+
+## Examples
+
+###### Find the first occurrence of a substring
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ the_index: strings.index(v: r.pageTitle, substr: "the")
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/indexany.md b/content/v2.0/reference/flux/stdlib/strings/indexany.md
new file mode 100644
index 000000000..e72b922fa
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/indexany.md
@@ -0,0 +1,55 @@
+---
+title: strings.indexAny() function
+description: >
+ The strings.indexAny() function returns the index of the first instance of specified characters in a string.
+aliases:
+ - /v2.0/reference/flux/functions/strings/indexany/
+menu:
+ v2_0_ref:
+ name: strings.indexAny
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/index-func/
+ - /v2.0/reference/flux/stdlib/strings/lastindex/
+ - /v2.0/reference/flux/stdlib/strings/lastindexany/
+---
+
+The `strings.indexAny()` function returns the index of the first instance of specified characters in a string.
+If none of the specified characters are present, it returns `-1`.
+
+_**Output data type:** Integer_
+
+```js
+import "strings"
+
+strings.indexAny(v: "chicken", chars: "aeiouy")
+
+// returns 2
+```
+
+## Parameters
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### chars
+Characters to search for.
+
+_**Data type:** String_
+
+## Examples
+
+###### Find the first occurrence of characters from a string
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ charIndex: strings.indexAny(v: r._field, chars: "_-")
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/isdigit.md b/content/v2.0/reference/flux/stdlib/strings/isdigit.md
new file mode 100644
index 000000000..1fb0b4c63
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/isdigit.md
@@ -0,0 +1,42 @@
+---
+title: strings.isDigit() function
+description: The strings.isDigit() function tests if a single character string is a digit (0-9).
+aliases:
+ - /v2.0/reference/flux/functions/strings/isdigit/
+menu:
+ v2_0_ref:
+ name: strings.isDigit
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/isletter/
+---
+
+The `strings.isDigit()` function tests if a single-character string is a digit (0-9).
+
+_**Output data type:** Boolean_
+
+```js
+import "strings"
+
+strings.isDigit(v: "A")
+
+// returns false
+```
+
+## Parameters
+
+### v
+The single-character string to test.
+
+_**Data type:** String_
+
+## Examples
+
+###### Filter by columns with digits as values
+```js
+import "strings"
+
+data
+ |> filter(fn: (r) => strings.isDigit(v: r.serverRef))
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/isletter.md b/content/v2.0/reference/flux/stdlib/strings/isletter.md
new file mode 100644
index 000000000..1ba665e0c
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/isletter.md
@@ -0,0 +1,42 @@
+---
+title: strings.isLetter() function
+description: The strings.isLetter() function tests if a single character string is a letter (a-z, A-Z).
+aliases:
+ - /v2.0/reference/flux/functions/strings/isletter/
+menu:
+ v2_0_ref:
+ name: strings.isLetter
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/isdigit/
+---
+
+The `strings.isLetter()` function tests if a single character string is a letter (a-z, A-Z).
+
+_**Output data type:** Boolean_
+
+```js
+import "strings"
+
+strings.isLetter(v: "A")
+
+// returns true
+```
+
+## Parameters
+
+### v
+The single character string to test.
+
+_**Data type:** String_
+
+## Examples
+
+###### Filter by columns with single-letter values
+```js
+import "strings"
+
+data
+ |> filter(fn: (r) => strings.isLetter(v: r.serverRef))
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/islower.md b/content/v2.0/reference/flux/stdlib/strings/islower.md
new file mode 100644
index 000000000..1f06c7ff8
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/islower.md
@@ -0,0 +1,42 @@
+---
+title: strings.isLower() function
+description: The strings.isLower() function tests if a single-character string is lowercase.
+aliases:
+ - /v2.0/reference/flux/functions/strings/islower/
+menu:
+ v2_0_ref:
+ name: strings.isLower
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/isupper
+---
+
+The `strings.isLower()` function tests if a single-character string is lowercase.
+
+_**Output data type:** Boolean_
+
+```js
+import "strings"
+
+strings.isLower(v: "a")
+
+// returns true
+```
+
+## Parameters
+
+### v
+The single-character string value to test.
+
+_**Data type:** String_
+
+## Examples
+
+###### Filter by columns with single-letter lowercase values
+```js
+import "strings"
+
+data
+ |> filter(fn: (r) => strings.isLower(v: r.host))
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/isupper.md b/content/v2.0/reference/flux/stdlib/strings/isupper.md
new file mode 100644
index 000000000..389faecf9
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/isupper.md
@@ -0,0 +1,42 @@
+---
+title: strings.isUpper() function
+description: The strings.isUpper() function tests if a single character string is uppercase.
+aliases:
+ - /v2.0/reference/flux/functions/strings/isupper/
+menu:
+ v2_0_ref:
+ name: strings.isUpper
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/islower
+---
+
+The `strings.isUpper()` function tests if a single character string is uppercase.
+
+_**Output data type:** Boolean_
+
+```js
+import "strings"
+
+strings.isUpper(v: "A")
+
+// returns true
+```
+
+## Parameters
+
+### v
+The single-character string value to test.
+
+_**Data type:** String_
+
+## Examples
+
+###### Filter by columns with single-letter uppercase values
+```js
+import "strings"
+
+data
+ |> filter(fn: (r) => strings.isUpper(v: r.host))
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/joinstr.md b/content/v2.0/reference/flux/stdlib/strings/joinstr.md
new file mode 100644
index 000000000..19708d29b
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/joinstr.md
@@ -0,0 +1,49 @@
+---
+title: strings.joinStr() function
+description: >
+ The strings.joinStr() function concatenates the elements of a string array into
+ a single string using a specified separator.
+aliases:
+ - /v2.0/reference/flux/functions/strings/joinstr/
+menu:
+ v2_0_ref:
+ name: strings.joinStr
+ parent: Strings
+weight: 301
+---
+
+The `strings.joinStr()` function concatenates elements of a string array into
+a single string using a specified separator.
+
+_**Output data type:** String_
+
+```js
+import "strings"
+
+strings.joinStr(arr: ["a", "b", "c"], v: ",")
+
+// returns "a,b,c"
+```
+
+## Parameters
+
+### arr
+The array of strings to concatenate.
+
+_**Data type:** Array of strings_
+
+### v
+The separator to use in the concatenated value.
+
+_**Data type:** String_
+
+## Examples
+
+###### Join a list of strings into a single string
+```js
+import "strings"
+
+searchTags = ["tag1", "tag2", "tag3"]
+
+strings.joinStr(arr: searchTags, v: ","))
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/lastindex.md b/content/v2.0/reference/flux/stdlib/strings/lastindex.md
new file mode 100644
index 000000000..de47b378c
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/lastindex.md
@@ -0,0 +1,56 @@
+---
+title: strings.lastIndex() function
+description: >
+ The strings.lastIndex() function returns the index of the last instance of a substring
+ in a string or `-1` if substring is not present.
+aliases:
+ - /v2.0/reference/flux/functions/strings/lastindex/
+menu:
+ v2_0_ref:
+ name: strings.lastIndex
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/index/
+ - /v2.0/reference/flux/stdlib/strings/indexany/
+ - /v2.0/reference/flux/stdlib/strings/lastindexany/
+---
+
+The `strings.lastIndex()` function returns the index of the last instance of a substring
+in a string. If the substring is not present, the function returns `-1`.
+
+_**Output data type:** Integer_
+
+```js
+import "strings"
+
+strings.lastIndex(v: "go gopher", t: "go")
+
+// returns 3
+```
+
+## Parameters
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### substr
+The substring to search for.
+
+_**Data type:** String_
+
+## Examples
+
+###### Find the last occurrence of a substring
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ the_index: strings.lastIndex(v: r.pageTitle, substr: "the")
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/lastindexany.md b/content/v2.0/reference/flux/stdlib/strings/lastindexany.md
new file mode 100644
index 000000000..3f93e97a6
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/lastindexany.md
@@ -0,0 +1,54 @@
+---
+title: strings.lastIndexAny() function
+description: The `strings.lastIndexAny()` function returns the index of the last instance of any specified characters in a string.
+aliases:
+ - /v2.0/reference/flux/functions/strings/lastindexany/
+menu:
+ v2_0_ref:
+ name: strings.lastIndexAny
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/index/
+ - /v2.0/reference/flux/stdlib/strings/indexany/
+ - /v2.0/reference/flux/stdlib/strings/lastindex/
+---
+
+The `strings.lastIndexAny()` function returns the index of the last instance of any specified characters in a string.
+If none of the specified characters are present, the function returns `-1`.
+
+_**Output data type:** Integer_
+
+```js
+import "strings"
+
+strings.lastIndexAny(v: "chicken", chars: "aeiouy")
+
+// returns 5
+```
+
+## Parameters
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### chars
+Characters to search for.
+
+_**Data type:** String_
+
+## Examples
+
+###### Find the last occurrence of characters from a string
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ charLastIndex: strings.lastIndexAny(v: r._field, chars: "_-")
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/repeat.md b/content/v2.0/reference/flux/stdlib/strings/repeat.md
new file mode 100644
index 000000000..07084055c
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/repeat.md
@@ -0,0 +1,50 @@
+---
+title: strings.repeat() function
+description: The strings.repeat() function returns a string consisting of `i` copies of a specified string.
+aliases:
+ - /v2.0/reference/flux/functions/strings/repeat/
+menu:
+ v2_0_ref:
+ name: strings.repeat
+ parent: Strings
+weight: 301
+---
+
+The `strings.repeat()` function returns a string consisting of `i` copies of a specified string.
+
+_**Output data type:** String_
+
+```js
+import "strings"
+
+strings.repeat(v: "ha", i: 3)
+
+// returns "hahaha"
+```
+
+## Parameters
+
+### v
+The string value to repeat.
+
+_**Data type:** String_
+
+### i
+The number of times to repeat `v`.
+
+_**Data type:** Integer_
+
+## Examples
+
+###### Repeat a string based on existing columns
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ laugh: r.laugh
+ intensity: r.intensity
+ laughter: strings.repeat(v: r.laugh, i: r.intensity)
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/replace.md b/content/v2.0/reference/flux/stdlib/strings/replace.md
new file mode 100644
index 000000000..caa1bcbd1
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/replace.md
@@ -0,0 +1,64 @@
+---
+title: strings.replace() function
+description: >
+ The strings.replace() function replaces the first `i` non-overlapping instances
+ of a substring with a specified replacement.
+aliases:
+ - /v2.0/reference/flux/functions/strings/replace/
+menu:
+ v2_0_ref:
+ name: strings.replace
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/replaceall
+---
+
+The `strings.replace()` function replaces the first `i` non-overlapping instances
+of a substring with a specified replacement.
+
+_**Output data type:** String_
+
+```js
+import "strings"
+
+strings.replace(v: "oink oink oink", t: "oink", u: "moo", i: 2)
+
+// returns "moo moo oink"
+```
+
+## Parameters
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### t
+The substring value to replace.
+
+_**Data type:** String_
+
+### u
+The replacement for `i` instances of `t`.
+
+_**Data type:** String_
+
+### i
+The number of non-overlapping `t` matches to replace.
+
+_**Data type:** Integer_
+
+## Examples
+
+###### Replace a specific number of string matches
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ content: strings.replace(v: r.content, t: "he", u: "her", i: 3)
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/replaceall.md b/content/v2.0/reference/flux/stdlib/strings/replaceall.md
new file mode 100644
index 000000000..e129c6405
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/replaceall.md
@@ -0,0 +1,59 @@
+---
+title: strings.replaceAll() function
+description: >
+ The strings.replaceAll() function replaces all non-overlapping instances of a
+ substring with a specified replacement.
+aliases:
+ - /v2.0/reference/flux/functions/strings/replaceall/
+menu:
+ v2_0_ref:
+ name: strings.replaceAll
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/replace
+---
+
+The `strings.replaceAll()` function replaces all non-overlapping instances of a
+substring with a specified replacement.
+
+_**Output data type:** String_
+
+```js
+import "strings"
+
+strings.replaceAll(v: "oink oink oink", t: "oink", u: "moo")
+
+// returns "moo moo moo"
+```
+
+## Parameters
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### t
+The substring to replace.
+
+_**Data type:** String_
+
+### u
+The replacement for all instances of `t`.
+
+_**Data type:** String_
+
+## Examples
+
+###### Replace string matches
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ content: strings.replaceAll(v: r.content, t: "he", u: "her")
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/split.md b/content/v2.0/reference/flux/stdlib/strings/split.md
new file mode 100644
index 000000000..aeea22df0
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/split.md
@@ -0,0 +1,52 @@
+---
+title: strings.split() function
+description: >
+ The strings.split() function splits a string on a specified separator and returns
+ an array of substrings.
+aliases:
+ - /v2.0/reference/flux/functions/strings/split/
+menu:
+ v2_0_ref:
+ name: strings.split
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/splitafter
+ - /v2.0/reference/flux/stdlib/strings/splitaftern
+ - /v2.0/reference/flux/stdlib/strings/splitn
+---
+
+The `strings.split()` function splits a string on a specified separator and returns
+an array of substrings.
+
+_**Output data type:** Array of strings_
+
+```js
+import "strings"
+
+strings.split(v: "a flux of foxes", t: " ")
+
+// returns ["a", "flux", "of", "foxes"]
+```
+
+## Parameters
+
+### v
+The string value to split.
+
+_**Data type:** String_
+
+### t
+The string value that acts as the separator.
+
+_**Data type:** String_
+
+## Examples
+
+###### Split a string into an array of substrings
+```js
+import "strings"
+
+data
+ |> map (fn:(r) => strings.split(v: r.searchTags, t: ","))
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/splitafter.md b/content/v2.0/reference/flux/stdlib/strings/splitafter.md
new file mode 100644
index 000000000..65e0542c7
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/splitafter.md
@@ -0,0 +1,52 @@
+---
+title: strings.splitAfter() function
+description: >
+ The strings.splitAfter() function splits a string after a specified separator and returns
+ an array of substrings.
+aliases:
+ - /v2.0/reference/flux/functions/strings/splitafter/
+menu:
+ v2_0_ref:
+ name: strings.splitAfter
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/split
+ - /v2.0/reference/flux/stdlib/strings/splitaftern
+ - /v2.0/reference/flux/stdlib/strings/splitn
+---
+
+The `strings.splitAfter()` function splits a string after a specified separator and returns
+an array of substrings.
+
+_**Output data type:** Array of strings_
+
+```js
+import "strings"
+
+strings.splitAfter(v: "a flux of foxes", t: " ")
+
+// returns ["a ", "flux ", "of ", "foxes"]
+```
+
+## Parameters
+
+### v
+The string value to split.
+
+_**Data type:** String_
+
+### t
+The string value that acts as the separator.
+
+_**Data type:** String_
+
+## Examples
+
+###### Split a string into an array of substrings
+```js
+import "strings"
+
+data
+ |> map (fn:(r) => strings.splitAfter(v: r.searchTags, t: ","))
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/splitaftern.md b/content/v2.0/reference/flux/stdlib/strings/splitaftern.md
new file mode 100644
index 000000000..f37feef5c
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/splitaftern.md
@@ -0,0 +1,57 @@
+---
+title: strings.splitAfterN() function
+description: >
+ The strings.splitAfterN() function splits a string after a specified separator and returns
+ an array of `i` substrings.
+aliases:
+ - /v2.0/reference/flux/functions/strings/splitaftern/
+menu:
+ v2_0_ref:
+ name: strings.splitAfterN
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/split
+ - /v2.0/reference/flux/stdlib/strings/splitafter
+ - /v2.0/reference/flux/stdlib/strings/splitn
+---
+
+The `strings.splitAfterN()` function splits a string after a specified separator and returns
+an array of `i` substrings.
+
+_**Output data type:** Array of strings_
+
+```js
+import "strings"
+
+strings.splitAfterN(v: "a flux of foxes", t: " ", i: 2)
+
+// returns ["a ", "flux ", "of foxes"]
+```
+
+## Parameters
+
+### v
+The string value to split.
+
+_**Data type:** String_
+
+### t
+The string value that acts as the separator.
+
+_**Data type:** String_
+
+### i
+The number of substrings to return.
+
+_**Data type:** Integer_
+
+## Examples
+
+###### Split a string into an array of substrings
+```js
+import "strings"
+
+data
+ |> map (fn:(r) => strings.splitAfterN(v: r.searchTags, t: ","))
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/splitn.md b/content/v2.0/reference/flux/stdlib/strings/splitn.md
new file mode 100644
index 000000000..c3481beb6
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/splitn.md
@@ -0,0 +1,57 @@
+---
+title: strings.splitN() function
+description: >
+ The strings.splitN() function splits a string on a specified separator and returns
+ an array of `i` substrings.
+aliases:
+ - /v2.0/reference/flux/functions/strings/splitn/
+menu:
+ v2_0_ref:
+ name: strings.splitN
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/split
+ - /v2.0/reference/flux/stdlib/strings/splitafter
+ - /v2.0/reference/flux/stdlib/strings/splitaftern
+---
+
+The `strings.splitN()` function splits a string on a specified separator and returns
+an array of `i` substrings.
+
+_**Output data type:** Array of strings_
+
+```js
+import "strings"
+
+strings.splitN(v: "a flux of foxes", t: " ", i: 2)
+
+// returns ["a", "flux", "of foxes"]
+```
+
+## Parameters
+
+### v
+The string value to split.
+
+_**Data type:** String_
+
+### t
+The string value that acts as the separator.
+
+_**Data type:** String_
+
+### i
+The number of substrings to return.
+
+_**Data type:** Integer_
+
+## Examples
+
+###### Split a string into an array of substrings
+```js
+import "strings"
+
+data
+ |> map (fn:(r) => strings.splitN(v: r.searchTags, t: ","))
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/strlen.md b/content/v2.0/reference/flux/stdlib/strings/strlen.md
new file mode 100644
index 000000000..0e92561c9
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/strlen.md
@@ -0,0 +1,55 @@
+---
+title: strings.strlen() function
+description: >
+ The strings.strlen() function returns the length of a string.
+ String length is determined by the number of UTF code points a string contains.
+aliases:
+ - /v2.0/reference/flux/functions/strings/strlen/
+menu:
+ v2_0_ref:
+ name: strings.strlen
+ parent: Strings
+weight: 301
+---
+
+The `strings.strlen()` function returns the length of a string.
+String length is determined by the number of UTF code points a string contains.
+
+_**Output data type:** Integer_
+
+```js
+import "strings"
+
+strings.strlen(v: "apple")
+
+// returns 5
+```
+
+## Parameters
+
+### v
+The string value to measure.
+
+_**Data type:** String_
+
+## Examples
+
+###### Filter based on string value length
+```js
+import "strings"
+
+data
+ |> filter(fn: (r) => strings.strlen(v: r._measurement) <= 4)
+```
+
+###### Store the length of string values
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ length: strings.strlen(v: r._value)
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/stdlib/strings/substring.md b/content/v2.0/reference/flux/stdlib/strings/substring.md
new file mode 100644
index 000000000..64cc621fd
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/substring.md
@@ -0,0 +1,57 @@
+---
+title: strings.substring() function
+description: >
+ The strings.substring() function returns a substring based on `start` and `end` parameters.
+ Indices are based on UTF code points.
+aliases:
+ - /v2.0/reference/flux/functions/strings/substring/
+menu:
+ v2_0_ref:
+ name: strings.substring
+ parent: Strings
+weight: 301
+---
+
+The `strings.substring()` function returns a substring based on `start` and `end` parameters.
+These parameters are represent indices of UTF code points in the string.
+
+_**Output data type:** String_
+
+```js
+import "strings"
+
+strings.substring(v: "influx", start: 0, end: 3)
+
+// returns "infl"
+```
+
+## Parameters
+
+### v
+The string value to search.
+
+_**Data type:** String_
+
+### start
+The starting index of the substring.
+
+_**Data type:** Integer_
+
+### end
+The ending index of the substring.
+
+_**Data type:** Integer_
+
+## Examples
+
+###### Store the first four characters of a string
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ abbr: strings.substring(v: r.name, start: 0, end: 3)
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/functions/strings/title.md b/content/v2.0/reference/flux/stdlib/strings/title.md
similarity index 64%
rename from content/v2.0/reference/flux/functions/strings/title.md
rename to content/v2.0/reference/flux/stdlib/strings/title.md
index bfd670dea..53357507b 100644
--- a/content/v2.0/reference/flux/functions/strings/title.md
+++ b/content/v2.0/reference/flux/stdlib/strings/title.md
@@ -1,11 +1,17 @@
---
title: strings.title() function
description: The strings.title() function converts a string to title case.
+aliases:
+ - /v2.0/reference/flux/functions/strings/title/
menu:
v2_0_ref:
name: strings.title
parent: Strings
weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/tolower
+ - /v2.0/reference/flux/stdlib/strings/totitle
+ - /v2.0/reference/flux/stdlib/strings/toupper
---
The `strings.title()` function converts a string to title case.
@@ -20,7 +26,7 @@ strings.title(v: "a flux of foxes")
// returns "A Flux Of Foxes"
```
-## Paramters
+## Parameters
### v
The string value to convert.
@@ -34,5 +40,5 @@ _**Data type:** String_
import "strings"
data
- |> map(fn:(r) => strings.title(v: r.pageTitle))
+ |> map(fn: (r) => ({ r with pageTitle: strings.title(v: r.pageTitle) }))
```
diff --git a/content/v2.0/reference/flux/functions/strings/tolower.md b/content/v2.0/reference/flux/stdlib/strings/tolower.md
similarity index 53%
rename from content/v2.0/reference/flux/functions/strings/tolower.md
rename to content/v2.0/reference/flux/stdlib/strings/tolower.md
index 8f64fb56c..1a58ccdc8 100644
--- a/content/v2.0/reference/flux/functions/strings/tolower.md
+++ b/content/v2.0/reference/flux/stdlib/strings/tolower.md
@@ -1,14 +1,20 @@
---
title: strings.toLower() function
-description: The strings.toLower() function converts a string to lower case.
+description: The strings.toLower() function converts a string to lowercase.
+aliases:
+ - /v2.0/reference/flux/functions/strings/tolower/
menu:
v2_0_ref:
name: strings.toLower
parent: Strings
weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/totitle
+ - /v2.0/reference/flux/stdlib/strings/toupper
+ - /v2.0/reference/flux/stdlib/strings/title
---
-The `strings.toLower()` function converts a string to lower case.
+The `strings.toLower()` function converts a string to lowercase.
_**Output data type:** String_
@@ -20,7 +26,7 @@ strings.toLower(v: "KOALA")
// returns "koala"
```
-## Paramters
+## Parameters
### v
The string value to convert.
@@ -34,5 +40,8 @@ _**Data type:** String_
import "strings"
data
- |> map(fn:(r) => strings.toLower(v: r.exclamation))
+ |> map(fn: (r) => ({
+ r with exclamation: strings.toLower(v: r.exclamation)
+ })
+ )
```
diff --git a/content/v2.0/reference/flux/stdlib/strings/totitle.md b/content/v2.0/reference/flux/stdlib/strings/totitle.md
new file mode 100644
index 000000000..733219824
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/totitle.md
@@ -0,0 +1,57 @@
+---
+title: strings.toTitle() function
+description: The strings.toTitle() function converts all characters in a string to title case.
+aliases:
+ - /v2.0/reference/flux/functions/strings/totitle/
+menu:
+ v2_0_ref:
+ name: strings.toTitle
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/toupper
+ - /v2.0/reference/flux/stdlib/strings/tolower
+ - /v2.0/reference/flux/stdlib/strings/title
+---
+
+The `strings.toTitle()` function converts all characters in a string to title case.
+
+_**Output data type:** String_
+
+```js
+import "strings"
+
+strings.toTitle(v: "a flux of foxes")
+
+// returns "A FLUX OF FOXES"
+```
+
+## Parameters
+
+### v
+The string value to convert.
+
+_**Data type:** String_
+
+## Examples
+
+###### Covert characters in a string to title case
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({ r with pageTitle: strings.toTitle(v: r.pageTitle) }))
+```
+
+{{% note %}}
+#### The difference between toTitle and toUpper
+The results of `toTitle()` and `toUpper` are often the same, however the difference
+is visible with special characters:
+
+```js
+str = "dz"
+
+strings.toTitle(v: str) // Returns Dz
+strings.toUpper(v: str) // Returns DZ
+```
+{{% /note %}}
diff --git a/content/v2.0/reference/flux/stdlib/strings/toupper.md b/content/v2.0/reference/flux/stdlib/strings/toupper.md
new file mode 100644
index 000000000..948f520a1
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/toupper.md
@@ -0,0 +1,57 @@
+---
+title: strings.toUpper() function
+description: The strings.toUpper() function converts a string to uppercase.
+aliases:
+ - /v2.0/reference/flux/functions/strings/toupper/
+menu:
+ v2_0_ref:
+ name: strings.toUpper
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/totitle
+ - /v2.0/reference/flux/stdlib/strings/tolower
+ - /v2.0/reference/flux/stdlib/strings/title
+---
+
+The `strings.toUpper()` function converts a string to uppercase.
+
+_**Output data type:** String_
+
+```js
+import "strings"
+
+strings.toUpper(v: "koala")
+
+// returns "KOALA"
+```
+
+## Parameters
+
+### v
+The string value to convert.
+
+_**Data type:** String_
+
+## Examples
+
+###### Convert all values of a column to upper case
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({ r with envVars: strings.toUpper(v: r.envVars) }))
+```
+
+{{% note %}}
+#### The difference between toTitle and toUpper
+The results of `toUpper()` and `toTitle` are often the same, however the difference
+is visible when using special characters:
+
+```js
+str = "dz"
+
+strings.toUpper(v: str) // Returns DZ
+strings.toTitle(v: str) // Returns Dz
+```
+{{% /note %}}
diff --git a/content/v2.0/reference/flux/functions/strings/trim.md b/content/v2.0/reference/flux/stdlib/strings/trim.md
similarity index 57%
rename from content/v2.0/reference/flux/functions/strings/trim.md
rename to content/v2.0/reference/flux/stdlib/strings/trim.md
index da1cb069b..c9ba4fc0c 100644
--- a/content/v2.0/reference/flux/functions/strings/trim.md
+++ b/content/v2.0/reference/flux/stdlib/strings/trim.md
@@ -3,11 +3,19 @@ title: strings.trim() function
description: >
The strings.trim() function removes leading and trailing characters specified
in the cutset from a string.
+aliases:
+ - /v2.0/reference/flux/functions/strings/trim/
menu:
v2_0_ref:
name: strings.trim
parent: Strings
weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/trimleft
+ - /v2.0/reference/flux/stdlib/strings/trimright
+ - /v2.0/reference/flux/stdlib/strings/trimprefix
+ - /v2.0/reference/flux/stdlib/strings/trimsuffix
+ - /v2.0/reference/flux/stdlib/strings/trimspace
---
The `strings.trim()` function removes leading and trailing characters specified
@@ -23,15 +31,15 @@ strings.trim(v: ".abc.", cutset: ".")
// returns "abc"
```
-## Paramters
+## Parameters
### v
-The string value from which to trim characters.
+String to remove characters from.
_**Data type:** String_
### cutset
-The leading and trailing characters to trim from the string value.
+The leading and trailing characters to remove from the string.
Only characters that match the `cutset` string exactly are trimmed.
_**Data type:** String_
@@ -43,5 +51,9 @@ _**Data type:** String_
import "strings"
data
- |> map(fn:(r) => strings.trim(v: r.variables, cutset: "."))
+ |> map(fn: (r) => ({
+ r with
+ variables: strings.trim(v: r.variables, cutset: ".")
+ })
+ )
```
diff --git a/content/v2.0/reference/flux/stdlib/strings/trimleft.md b/content/v2.0/reference/flux/stdlib/strings/trimleft.md
new file mode 100644
index 000000000..f4cf088bc
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/trimleft.md
@@ -0,0 +1,57 @@
+---
+title: strings.trimLeft() function
+description: >
+ The strings.trimLeft() function removes specified leading characters from a string.
+aliases:
+ - /v2.0/reference/flux/functions/strings/trimleft/
+menu:
+ v2_0_ref:
+ name: strings.trimLeft
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/trim
+ - /v2.0/reference/flux/stdlib/strings/trimright
+ - /v2.0/reference/flux/stdlib/strings/trimprefix
+ - /v2.0/reference/flux/stdlib/strings/trimsuffix
+ - /v2.0/reference/flux/stdlib/strings/trimspace
+---
+
+The `strings.trimLeft()` function removes specified leading characters from a string.
+
+_**Output data type:** String_
+
+```js
+import "strings"
+
+strings.trimLeft(v: ".abc.", cutset: ".")
+
+// returns "abc."
+```
+
+## Parameters
+
+### v
+String to remove characters from.
+
+_**Data type:** String_
+
+### cutset
+The leading characters to remove from the string.
+Only characters that match the `cutset` string exactly are removed.
+
+_**Data type:** String_
+
+## Examples
+
+###### Trim leading periods from all values in a column
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ variables: strings.trimLeft(v: r.variables, cutset: ".")
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/functions/strings/trimprefix.md b/content/v2.0/reference/flux/stdlib/strings/trimprefix.md
similarity index 62%
rename from content/v2.0/reference/flux/functions/strings/trimprefix.md
rename to content/v2.0/reference/flux/stdlib/strings/trimprefix.md
index 25bcc0d53..441b64823 100644
--- a/content/v2.0/reference/flux/functions/strings/trimprefix.md
+++ b/content/v2.0/reference/flux/stdlib/strings/trimprefix.md
@@ -3,11 +3,19 @@ title: strings.trimPrefix() function
description: >
The `strings.trimPrefix()` function removes a prefix from a string.
Strings that do not start with the prefix are returned unchanged.
+aliases:
+ - /v2.0/reference/flux/functions/strings/trimprefix/
menu:
v2_0_ref:
name: strings.trimPrefix
parent: Strings
weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/trim
+ - /v2.0/reference/flux/stdlib/strings/trimleft
+ - /v2.0/reference/flux/stdlib/strings/trimright
+ - /v2.0/reference/flux/stdlib/strings/trimsuffix
+ - /v2.0/reference/flux/stdlib/strings/trimspace
---
The `strings.trimPrefix()` function removes a prefix from a string.
@@ -23,7 +31,7 @@ strings.trimPrefix(v: "123_abc", prefix: "123")
// returns "_abc"
```
-## Paramters
+## Parameters
### v
The string value to trim.
@@ -42,5 +50,9 @@ _**Data type:** String_
import "strings"
data
- |> map(fn:(r) => strings.trimPrefix(v: r.sensorId, prefix: "s12_"))
+ |> map(fn: (r) => ({
+ r with
+ sensorID: strings.trimPrefix(v: r.sensorId, prefix: "s12_")
+ })
+ )
```
diff --git a/content/v2.0/reference/flux/stdlib/strings/trimright.md b/content/v2.0/reference/flux/stdlib/strings/trimright.md
new file mode 100644
index 000000000..35419498d
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/strings/trimright.md
@@ -0,0 +1,58 @@
+---
+title: strings.trimRight() function
+description: >
+ The strings.trimRight() function removes trailing characters specified in the cutset from a string.
+aliases:
+ - /v2.0/reference/flux/functions/strings/trimright/
+menu:
+ v2_0_ref:
+ name: strings.trimRight
+ parent: Strings
+weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/trim
+ - /v2.0/reference/flux/stdlib/strings/trimleft
+ - /v2.0/reference/flux/stdlib/strings/trimprefix
+ - /v2.0/reference/flux/stdlib/strings/trimsuffix
+ - /v2.0/reference/flux/stdlib/strings/trimspace
+---
+
+The `strings.trimRight()` function removes trailing characters specified in the
+[`cutset`](#cutset) from a string.
+
+_**Output data type:** String_
+
+```js
+import "strings"
+
+strings.trimRight(v: ".abc.", cutset: ".")
+
+// returns "abc."
+```
+
+## Parameters
+
+### v
+String to remove characters from.
+
+_**Data type:** String_
+
+### cutset
+The trailing characters to trim from the string.
+Only characters that match the `cutset` string exactly are trimmed.
+
+_**Data type:** String_
+
+## Examples
+
+###### Trim trailing periods from all values in a column
+```js
+import "strings"
+
+data
+ |> map(fn: (r) => ({
+ r with
+ variables: strings.trimRight(v: r.variables, cutset: ".")
+ })
+ )
+```
diff --git a/content/v2.0/reference/flux/functions/strings/trimspace.md b/content/v2.0/reference/flux/stdlib/strings/trimspace.md
similarity index 56%
rename from content/v2.0/reference/flux/functions/strings/trimspace.md
rename to content/v2.0/reference/flux/stdlib/strings/trimspace.md
index 4c222a01b..0645b8b71 100644
--- a/content/v2.0/reference/flux/functions/strings/trimspace.md
+++ b/content/v2.0/reference/flux/stdlib/strings/trimspace.md
@@ -1,11 +1,19 @@
---
title: strings.trimSpace() function
description: The strings.trimSpace() function removes leading and trailing spaces from a string.
+aliases:
+ - /v2.0/reference/flux/functions/strings/trimspace/
menu:
v2_0_ref:
name: strings.trimSpace
parent: Strings
weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/trim
+ - /v2.0/reference/flux/stdlib/strings/trimleft
+ - /v2.0/reference/flux/stdlib/strings/trimright
+ - /v2.0/reference/flux/stdlib/strings/trimprefix
+ - /v2.0/reference/flux/stdlib/strings/trimsuffix
---
The `strings.trimSpace()` function removes leading and trailing spaces from a string.
@@ -20,10 +28,10 @@ strings.trimSpace(v: " abc ")
// returns "abc"
```
-## Paramters
+## Parameters
### v
-The string value from which to trim spaces.
+String to remove spaces from.
_**Data type:** String_
@@ -34,5 +42,5 @@ _**Data type:** String_
import "strings"
data
- |> map(fn:(r) => strings.trimSpace(v: r.userInput))
+ |> map(fn: (r) => ({ r with userInput: strings.trimSpace(v: r.userInput) }))
```
diff --git a/content/v2.0/reference/flux/functions/strings/trimsuffix.md b/content/v2.0/reference/flux/stdlib/strings/trimsuffix.md
similarity index 62%
rename from content/v2.0/reference/flux/functions/strings/trimsuffix.md
rename to content/v2.0/reference/flux/stdlib/strings/trimsuffix.md
index d69a018cf..7d6d12f79 100644
--- a/content/v2.0/reference/flux/functions/strings/trimsuffix.md
+++ b/content/v2.0/reference/flux/stdlib/strings/trimsuffix.md
@@ -3,11 +3,19 @@ title: strings.trimSuffix() function
description: >
The `strings.trimSuffix()` function removes a suffix from a string.
Strings that do not end with the suffix are returned unchanged.
+aliases:
+ - /v2.0/reference/flux/functions/strings/trimsuffix/
menu:
v2_0_ref:
name: strings.trimSuffix
parent: Strings
weight: 301
+related:
+ - /v2.0/reference/flux/stdlib/strings/trim
+ - /v2.0/reference/flux/stdlib/strings/trimleft
+ - /v2.0/reference/flux/stdlib/strings/trimright
+ - /v2.0/reference/flux/stdlib/strings/trimprefix
+ - /v2.0/reference/flux/stdlib/strings/trimspace
---
The `strings.trimSuffix()` function removes a suffix from a string.
@@ -23,7 +31,7 @@ strings.trimSuffix(v: "123_abc", suffix: "abc")
// returns "123_"
```
-## Paramters
+## Parameters
### v
The string value to trim.
@@ -42,5 +50,9 @@ _**Data type:** String_
import "strings"
data
- |> map(fn:(r) => strings.trimSuffix(v: r.sensorId, suffix: "_s12"))
+ |> map(fn: (r) => ({
+ r with
+ sensorID: strings.trimSuffix(v: r.sensorId, suffix: "_s12")
+ })
+ )
```
diff --git a/content/v2.0/reference/flux/functions/system/_index.md b/content/v2.0/reference/flux/stdlib/system/_index.md
similarity index 82%
rename from content/v2.0/reference/flux/functions/system/_index.md
rename to content/v2.0/reference/flux/stdlib/system/_index.md
index 86db2f881..8bd1d6c9f 100644
--- a/content/v2.0/reference/flux/functions/system/_index.md
+++ b/content/v2.0/reference/flux/stdlib/system/_index.md
@@ -4,11 +4,13 @@ list_title: System package
description: >
The Flux system package provides functions for reading values from the system.
Import the `system` package.
+aliases:
+ - /v2.0/reference/flux/functions/system/
menu:
v2_0_ref:
name: System
- parent: Flux packages and functions
-weight: 204
+ parent: Flux standard library
+weight: 202
v2.0/tags: [system, functions, package]
---
diff --git a/content/v2.0/reference/flux/stdlib/system/time.md b/content/v2.0/reference/flux/stdlib/system/time.md
new file mode 100644
index 000000000..961a3a371
--- /dev/null
+++ b/content/v2.0/reference/flux/stdlib/system/time.md
@@ -0,0 +1,46 @@
+---
+title: system.time() function
+description: The `system.time()` function returns the current system time.
+aliases:
+ - /v2.0/reference/flux/functions/misc/systemtime
+ - /v2.0/reference/flux/functions/built-in/misc/systemtime
+ - /v2.0/reference/flux/functions/system/time/
+menu:
+ v2_0_ref:
+ name: system.time
+ parent: System
+weight: 401
+related:
+ - /v2.0/reference/flux/stdlib/built-in/misc/now/
+---
+
+The `system.time()` function returns the current system time.
+
+_**Function type:** Date/Time_
+_**Output data type:** Timestamp_
+
+```js
+import "system"
+
+system.time()
+```
+
+## Examples
+```js
+import "system"
+
+data
+ |> set(key: "processed_at", value: string(v: system.time() ))
+```
+
+{{% note %}}
+#### system.time() vs now()
+`system.time()` returns the current system time of the host machine, which
+typically accounts for the local time zone.
+This time represents the time at which `system.time()` it is executed, so each
+instance of `system.time()` in a Flux script returns a unique value.
+
+[`now()`](/v2.0/reference/flux/stdlib/built-in/misc/now/) returns the current UTC time.
+`now()` is cached at runtime, so all instances of `now()` in a Flux script
+return the same value.
+{{% /note %}}
diff --git a/content/v2.0/reference/flux/functions/testing/_index.md b/content/v2.0/reference/flux/stdlib/testing/_index.md
similarity index 83%
rename from content/v2.0/reference/flux/functions/testing/_index.md
rename to content/v2.0/reference/flux/stdlib/testing/_index.md
index 10475d55b..e74845d88 100644
--- a/content/v2.0/reference/flux/functions/testing/_index.md
+++ b/content/v2.0/reference/flux/stdlib/testing/_index.md
@@ -4,11 +4,13 @@ list_title: Testing package
description: >
The Flux testing package provides functions that test piped-forward data in specific ways.
Import the `testing` package.
+aliases:
+ - /v2.0/reference/flux/functions/testing/
menu:
v2_0_ref:
name: Testing
- parent: Flux packages and functions
-weight: 205
+ parent: Flux standard library
+weight: 202
v2.0/tags: [testing, functions, package]
---
diff --git a/content/v2.0/reference/flux/functions/testing/assertempty.md b/content/v2.0/reference/flux/stdlib/testing/assertempty.md
similarity index 91%
rename from content/v2.0/reference/flux/functions/testing/assertempty.md
rename to content/v2.0/reference/flux/stdlib/testing/assertempty.md
index 33b7a30b1..f5d56d680 100644
--- a/content/v2.0/reference/flux/functions/testing/assertempty.md
+++ b/content/v2.0/reference/flux/stdlib/testing/assertempty.md
@@ -1,6 +1,8 @@
---
title: testing.assertEmpty() function
description: The testing.assertEmpty() function tests if an input stream is empty.
+aliases:
+ - /v2.0/reference/flux/functions/testing/assertempty/
menu:
v2_0_ref:
name: testing.assertEmpty
@@ -24,7 +26,7 @@ _The `testing.assertEmpty()` function can be used to perform in-line tests in a
## Examples
#### Check if there is a difference between streams
-This example uses the [`testing.diff()` function](/v2.0/reference/flux/functions/testing/diff)
+This example uses the [`testing.diff()` function](/v2.0/reference/flux/stdlib/testing/diff)
which outputs the diff for the two streams.
The `.testing.assertEmpty()` function checks to see if the diff is empty.
diff --git a/content/v2.0/reference/flux/functions/testing/assertequals.md b/content/v2.0/reference/flux/stdlib/testing/assertequals.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/testing/assertequals.md
rename to content/v2.0/reference/flux/stdlib/testing/assertequals.md
index 0c6fb733e..8fdee95ee 100644
--- a/content/v2.0/reference/flux/functions/testing/assertequals.md
+++ b/content/v2.0/reference/flux/stdlib/testing/assertequals.md
@@ -3,6 +3,7 @@ title: testing.assertEquals() function
description: The testing.assertEquals() function tests whether two streams have identical data.
aliases:
- /v2.0/reference/flux/functions/tests/assertequals
+ - /v2.0/reference/flux/functions/testing/assertequals/
menu:
v2_0_ref:
name: testing.assertEquals
diff --git a/content/v2.0/reference/flux/functions/testing/diff.md b/content/v2.0/reference/flux/stdlib/testing/diff.md
similarity index 96%
rename from content/v2.0/reference/flux/functions/testing/diff.md
rename to content/v2.0/reference/flux/stdlib/testing/diff.md
index db2e0af5f..8e3dd1e37 100644
--- a/content/v2.0/reference/flux/functions/testing/diff.md
+++ b/content/v2.0/reference/flux/stdlib/testing/diff.md
@@ -1,6 +1,8 @@
---
title: testing.diff() function
description: The testing.diff() function produces a diff between two streams.
+aliases:
+ - /v2.0/reference/flux/functions/testing/diff/
menu:
v2_0_ref:
name: testing.diff
diff --git a/content/v2.0/reference/glossary.md b/content/v2.0/reference/glossary.md
new file mode 100644
index 000000000..1e44214d0
--- /dev/null
+++ b/content/v2.0/reference/glossary.md
@@ -0,0 +1,1094 @@
+---
+title: Glossary
+description: >
+ Terms related to InfluxData products and platforms.
+weight: 7
+menu:
+ v2_0_ref:
+ name: Glossary
+v2.0/tags: [glossary]
+---
+
+[A](#a) | [B](#b) | [C](#c) | [D](#d) | [E](#e) | [F](#f) | [G](#g) | [H](#h) | [I](#i) | [J](#j) | [K](#k) | [L](#l) | [M](#m) | [N](#n) | [O](#o) | [P](#p) | [Q](#q) | [R](#r) | [S](#s) | [T](#t) | [U](#u) | [V](#v) | [W](#w) | [X](#x) | [Y](#y) | [Z](#z)
+
+## A
+
+### agent
+
+A background process started by (or on behalf of) a user and typically requires user input.
+
+Telegraf is an agent that requires user input (a configuration file) to gather metrics from declared input plugins and sends metrics to declared output plugins, based on the plugins enabled for a configuration.
+
+Related entries: [input plugin](#input-plugin), [output plugin](#output-plugin), [daemon](#daemon)
+
+### aggregator plugin
+
+Receives metrics from input plugins, creates aggregate metrics, and then passes aggregate metrics to configured output plugins.
+
+Related entries: [input plugin](#input-plugin), [output plugin](#output-plugin), [processor plugin](#processor-plugin)
+
+### aggregate
+
+A function that returns an aggregated value across a set of points.
+For a list of available aggregation functions, see [Flux built-in aggregate functions](/v2.0/reference/flux/stdlib/built-in/transformations/aggregates/).
+
+Related entries: [function](#function), [selector](#selector), [transformation](#transformation)
+
+## B
+
+### bar graph
+
+A visual representation in the InfluxDB user interface used to compare variables (bars) and plot categorical data.
+A bar graph has spaces between bars, can be sorted in any order, and bars in the graph typically have the same width.
+
+Related entries: [histogram](#histogram)
+
+### batch
+
+A collection of points in line protocol format, separated by newlines (`0x0A`).
+Submitting a batch of points using a single HTTP request to the write endpoints drastically increases performance by reducing the HTTP overhead.
+InfluxData typically recommends batch sizes of 5,000-10,000 points.
+In some use cases, performance may improve with significantly smaller or larger batches.
+
+Related entries: [line protocol](/v2.0/reference/line-protocol/), [point](#point)
+
+### batch size
+
+The number of lines or individual data points in a line protocol batch.
+The Telegraf agent sends metrics to output plugins in batches rather than individually.
+Batch size controls the size of each write batch that Telegraf sends to the output plugins.
+
+Related entries: [output plugin](#output-plugin)
+
+### bin
+
+In a cumulative histogram, a bin includes all data points less than or equal to a specified upper bound.
+In a normal histogram, a bin includes all data points between the upper and lower bounds.
+
+### block
+
+In Flux, a block is a possibly empty sequence of statements within matching braces (`{ }`).
+Two types of blocks exist in Flux:
+
+- Explicit blocks in the source code, for example:
+
+ ```
+ Block = "{" StatementList "}
+ StatementList = { Statement }
+ ```
+
+- Implicit blocks, including:
+
+ - Universe: Encompasses all Flux source text.
+ - Package: Each package includes a package block that contains Flux source text for the package.
+ - File: Each file has a file block containing Flux source text in the file.
+ - Function: Each function literal has a function block with Flux source text (even if not explicitly declared).
+
+Related entries: [implicit block](#implicit-block), [explicit block](#explicit-block)
+
+### boolean
+
+A data type with two possible values: true or false.
+By convention, you can express `true` as the integer `1` and false as the integer `0` (zero).
+
+### bucket
+
+A bucket is a named location where time series data is stored.
+All buckets have a retention policy, a duration of time that each data point persists.
+A bucket belongs to an organization.
+
+## C
+
+### check
+
+Checks are part of queries used in monitoring to read input data and assign a [status](#check-status) (`_level`) based on specified conditions.
+For example:
+
+```
+monitor.check(
+ crit: (r) => r._value > 90.0,
+ warn: (r) => r._value > 80.0,
+ info: (r) => r._value > 60.0,
+ ok: (r) => r._value <= 20.0,
+ messageFn: (r) => "The current level is ${r._level}",
+)
+```
+
+This check gives rows with a `_value` greater than 90.0 a crit `_level`; rows greater than 80.0 get a warn `_level`, and so on.
+
+Learn how to [create a check](/v2.0/monitor-alert/checks/create/).
+
+Related entries: [check status](#check-status), [notification rule](#notification-rule), [notification endpoint](#notification-endpoint)
+
+### check status
+
+A [check](#check) gets one of the following statuses (`_level`): `crit`, `info`, `warn`, or `ok`.
+Check statuses are written to a status measurement in the `_monitoring` bucket.
+
+Related entries: [check](#check), [notification rule](#notification-rule), [notification endpoint](#notification-endpoint)
+
+### CSV
+
+Comma-separated values (CSV) delimits text between commas to separate values.
+A CSV file stores tabular data (numbers and text) in plain text.
+Each line of the file is a data record.
+Each record consists of one or more fields, separated by commas.
+CSV file format is not fully standardized.
+
+InfluxData uses annotated CSV (comma-separated values) format to encode HTTP responses and results returned to the Flux csv.from() function.
+For more detail, see [Annotated CSV](/v2.0/reference/annotated-csv/).
+
+
+
+### co-monitoring dashboard
+
+The prebuilt co-monitoring dashboard displays details of your instance based on metrics from Telegraf, allowing you to monitor overall performance.
+
+### collect
+
+Collect and write time series data to InfluxDB using line protocol, Telegraf or InfluxDB scrapers, the InfluxDB v2 API, influx command line interface (CLI),the InfluxDB user interface (UI), and client libraries.
+
+### collection interval
+
+The default global interval for collecting data from each Telegraf input plugin.
+The collection interval can be overridden by each individual input plugin's configuration.
+
+Related entries: [input plugin](#input-plugin)
+
+
+
+### collection jitter
+
+Collection jitter prevents every input plugin from collecting metrics simultaneously, which can have a measurable effect on the system.
+For each collection interval, every Telegraf input plugin will sleep for a random time between zero and the collection jitter before collecting the metrics.
+
+Related entries: [collection interval](#collection-interval), [input plugin](#input-plugin)
+
+### column
+
+InfluxDB data is stored in tables within rows and columns.
+Columns store tag sets (indexed) and fields sets.
+The only required column is _time_, which stores timestamps and is included in all InfluxDB tables.
+
+### comment
+
+Use comments with Flux statements to describe your functions.
+
+### common log format (CLF)
+
+A standardized text file format used by the InfluxDB web server to create log entries when generating server log files.
+
+### continuous query (CQ)
+
+Continuous queries are the predecessor to tasks in InfluxDB 2.0.
+Continuous queries run automatically and periodically on a database.
+
+Related entries: [function](#function)
+
+## D
+
+### daemon
+
+A background process that runs without user input.
+
+### dashboard
+
+InfluxDB dashboards visualize time series data.
+Use dashboards to query and graph data.
+
+### dashboard variable
+
+Dashboard template variables define components of a cell query.
+Dashboard variables make is easier to interact with and explore your databoard data.
+Use the InfluxDB user interface (UI) to add predefined template variables or customize your own template variables.
+
+### Data Explorer
+
+Use the Data Explorer in the InfluxDB user interface (UI) to view, add, or delete variables and functions manually or using the Script Editor.
+
+### data model
+
+A data model organizes elements of data and standardizes how they relate to one another and to properties of the real world entities.
+
+Flux uses a data model built from basic data types: tables, records, columns and streams.
+
+
+
+### data service
+
+Stores time series data and handles writes and queries.
+
+### data source
+
+A source of data that InfluxDB collects or queries data from.
+Examples include InfluxDB buckets, Prometheus, Postgres, MySQL, and InfluxDB clients.
+
+Related entries: [bucket](#bucket)
+
+### data type
+
+A data type is defined by the values it can take, the programming language used, or the operations that can be performed on it.
+
+InfluxDB supports the following data types: float, integer, string, boolean, and timestamp.
+
+### database
+
+In InfluxDB 2.0, a database represents the InfluxDB instance as a whole.
+
+Related entries: [continuous query](#continuous-query-cq), [user](#user)
+
+### date-time
+
+InfluxDB stores the date-time format for each data point in a timestamp with nanosecond-precision Unix time.
+Specifying a timestamp is options.
+If a timestamp isn't specified for a data point, InfluxDB uses the server’s local nanosecond timestamp in UTC.
+
+### downsample
+
+Aggregating high resolution data into lower resolution data to preserve disk space.
+
+### duration
+
+A data type that represents a duration of time (1s, 1m, 1h, 1d).
+Retention policies are set using durations.
+Data older than the duration is automatically dropped from the database.
+
+
+
+## E
+
+### event
+
+Metrics gathered at irregular time intervals.
+
+### explicit block
+
+In Flux, a an explicit block is a possibly empty sequence of statements within matching braces (`{ }`) that is defined in the source code, for example:
+
+```
+Block = "{" StatementList "}
+StatementList = { Statement }
+```
+
+Related entries: [implicit block](#implicit-block), [block](#block)
+
+### expression
+
+A combination of one or more constants, variables, operators, and functions.
+
+## F
+
+### field
+
+The key-value pair in InfluxDB's data structure that records metadata and the actual data value.
+Fields are required in InfluxDB's data structure and they are not indexed - queries on field values scan all points that match the specified time range and, as a result, are not performant relative to tags.
+
+*Query tip:* Compare fields to tags; tags are indexed.
+
+Related entries: [field key](#field-key), [field set](#field-set), [field value](#field-value), [tag](#tag)
+
+### field key
+
+The key of the key-value pair.
+Field keys are strings and they store metadata.
+
+Related entries: [field](#field), [field set](#field-set), [field value](#field-value), [tag key](#tag-key)
+
+### field set
+
+The collection of field keys and field values on a point.
+
+Related entries: [field](#field), [field key](#field-key), [field value](#field-value), [point](#point)
+
+### field value
+
+The value of a key-value pair.
+Field values are the actual data; they can be strings, floats, integers, or booleans.
+A field value is always associated with a timestamp.
+
+Field values are not indexed - queries on field values scan all points that match the specified time range and, as a result, are not performant.
+
+*Query tip:* Compare field values to tag values; tag values are indexed.
+
+Related entries: [field](#field), [field key](#field-key), [field set](#field-set), [tag value](#tag-value), [timestamp](#timestamp)
+
+### file block
+
+A file block is a fixed-length chunk of data read into memory when requested by an application.
+
+Related entries: [block](#block)
+
+### float
+
+A float represents real numbers and is written with a decimal point dividing the integer and fractional parts.
+For example, 1.0, 3.14.
+
+### flush interval
+
+The global interval for flushing data from each Telegraf output plugin to its destination.
+This value should not be set lower than the collection interval.
+
+Related entries: [collection interval](#collection-interval), [flush jitter](#flush-jitter), [output plugin](#output-plugin)
+
+### flush jitter
+
+Flush jitter prevents every Telegraf output plugin from sending writes simultaneously, which can overwhelm some data sinks.
+Each flush interval, every Telegraf output plugin will sleep for a random time between zero and the flush jitter before emitting metrics.
+Flush jitter smooths out write spikes when running a large number of Telegraf instances.
+
+Related entries: [flush interval](#flush-interval), [output plugin](#output-plugin)
+
+### Flux
+
+A lightweight scripting language for querying databases (like InfluxDB) and working with data.
+
+### function
+
+Flux functions aggregate, select, and transform time series data.
+For a complete list of Flux functions, see [Flux functions](/v2.0/reference/flux/stdlib/all-functions/).
+
+Related entries: [aggregate](#aggregate), [selector](#selector), [transformation](#transformation)
+
+### function block
+
+In Flux, each file has a file block containing all Flux source text in that file.
+Each function literal has its own function block even if not explicitly declared.
+
+## G
+
+### gauge
+
+ A type of visualization that displays the single most recent value for a time series.
+A gauge typically displays one or more measures from a single row, and is not designed to display multiple rows of data.
+Elements include a range, major and minor tick marks (within the range), and a pointer (needle) indicating the single most recent value.
+
+### graph
+
+A diagram that visually depicts the relation between variable quantities measured along specified axes.
+
+### gzip
+
+gzip is a type of data compression that compress chunks of data, which is restored by unzipping compressed gzip files.
+the gzip file extension is `.gz`.
+
+## H
+
+
+
+### histogram
+
+A visual representation of statistical information that uses rectangles to show the frequency of data items in successive, equal intervals or bins.
+
+## I
+
+### identifier
+
+Identifiers are tokens that refer to task names, bucket names, field keys,
+measurement names, tag keys, and user names.
+For examples and rules, see [Flux language lexical elements](/v2.0/reference/flux/language/lexical-elements/#identifiers).
+
+Related entries:
+[bucket](#bucket)
+[field key](#field-key),
+[measurement](#measurement),
+
+[tag key](#tag-key),
+[user](#user)
+
+### implicit block
+
+In Flux, an implicit block is a possibly empty sequence of statements within matching braces ({ }) that includes the following types:
+
+ - Universe: Encompasses all Flux source text.
+ - Package: Each package includes a package block that contains Flux source text for the package.
+ - File: Each file has a file block containing Flux source text in the file.
+ - Function: Each function literal has a function block with Flux source text (even if not explicitly declared).
+
+Related entries: [explict block](#explicit-block), [block](#block)
+
+### influx
+
+A command line interface (CLI) that interacts with the InfluxDB daemon (influxd).
+
+### influxd
+
+The InfluxDB daemon that runs the InfluxDB server and other required processes.
+
+### InfluxDB
+
+An open-source time series database (TSDB) developed by InfluxData.
+Written in Go and optimized for fast, high-availability storage and retrieval of time series data in fields such as operations monitoring, application metrics, Internet of Things sensor data, and real-time analytics.
+
+### InfluxDB UI
+
+The graphical web interface provided by InfluxDB for visualizing data and managing InfluxDB functionality.
+
+### InfluxQL
+
+The SQL-like query language used to query data in InfluxDB 1.x.
+
+### input plugin
+
+Telegraf input plugins actively gather metrics and deliver them to the core agent, where aggregator, processor, and output plugins can operate on the metrics.
+In order to activate an input plugin, it needs to be enabled and configured in Telegraf's configuration file.
+
+Related entries: [aggregator plugin](#aggregator-plugin), [collection interval](#collection-interval), [output plugin](#output-plugin), [processor plugin](#processor-plugin)
+
+### instance
+
+An entity comprising data on a server (or virtual server in cloud computing).
+
+
+### int (data type)
+
+A data type that represents an integer, a whole number that's positive, negative, or zero.
+
+## J
+
+### JWT
+
+Typically, JSON web tokens (JWT) are used to authenticate users between an identity provider and a service provider.
+A server can generate a JWT to assert any business processes.
+For example, an "admin" token sent to a client can prove the client is logged in as admin.
+Tokens are signed by one party's private key (typically, the server).
+Private keys are used by both parties to verify that a token is legitimate.
+
+JWT uses an open standard [RFC 7519](https://tools.ietf.org/html/rfc7519).
+
+### Jaeger
+
+Open source tracing used in distributed systems to monitor and troubleshoot transactions.
+
+### JSON
+
+JavaScript Object Notation (JSON) is an open-standard file format that uses human-readable text to transmit data objects consisting of attribute–value pairs and array data types.
+
+## K
+
+### keyword
+
+A keyword is reserved by a program because it has special meaning.
+Every programming language has a set of keywords (reserved names) that cannot be used as an identifier.
+
+See a list of [Flux keywords](/v2.0/reference/flux/language/lexical-elements/#keywords).
+
+## L
+
+### literal
+
+A literal is value in an expression, a number, character, string, function, object, or array.
+Literal values are interpreted as defined.
+
+See examples of [Flux literals](/v2.0/reference/flux/language/expressions/#examples-of-function-literals).
+
+
+
+### logs
+
+Logs record information.
+Event logs describe system events and activity that help to describe and diagnose problems.
+Transaction logs describe changes to stored data that help recover data if a database crashes or other errors occur.
+
+The InfluxDB 2.0 user interface (UI) can be used to view log history and data.
+
+### Line protocol (LP)
+
+The text based format for writing points to InfluxDB.
+See [line protocol](/v2.0/reference/line-protocol/).
+
+## M
+
+### measurement
+
+The part of InfluxDB's structure that describes the data stored in the associated fields.
+Measurements are strings.
+
+Related entries: [field](#field), [series](#series)
+
+### member
+
+A user in an organization.
+
+
+
+
+### metric
+
+Data tracked over time.
+
+### metric buffer
+
+The metric buffer caches individual metrics when writes are failing for an Telegraf output plugin.
+Telegraf will attempt to flush the buffer upon a successful write to the output.
+The oldest metrics are dropped first when this buffer fills.
+
+Related entries: [output plugin](#output-plugin)
+
+### missing values
+
+Denoted by a null value.
+Identifies missing information, which may be useful to include in an error message.
+
+The Flux data model includes [Missing values (null)](/v2.0/reference/flux/language/data-model/#missing-values-null).
+
+## N
+
+### node
+
+An independent `influxd` process.
+
+Related entries: [server](#server)
+
+### notification endpoint
+
+ The notification endpoint specifies the Slack or PagerDuty endpoint to send a notification and contains configuration details for connecting to the endpoint.
+Learn how to [create a notification endpoint](/v2.0/monitor-alert/notification-endpoints/create).
+
+Related entries: [check](#check), [notification rule](#notification-rule)
+
+### notification rule
+
+A notification rule specifies a status level (and tags) to alert on, the notification message to send for the specified status level (or change in status level), and the interval or schedule you want to check the status level (and tags).
+If conditions are met, the notification rule sends a message to the [notification endpoint](#notification-endpoint) and stores a receipt in a notification measurement in the `_monitoring` bucket.
+For example, a notification rule may specify a message to send to a Slack endpoint when a status level is critical (`crit`).
+
+Learn how to [create a notification rule](/v2.0/monitor-alert/notification-rules/create).
+
+Related entries: [check](#check), [notification endpoint](#notification-endpoint)
+
+### now()
+
+The local server's nanosecond timestamp.
+
+### null
+
+A data type that represents a missing or unknown value.
+Denoted by the null value.
+
+## O
+
+### operator
+
+A symbol that usually represents an action or process.
+For example: `+`, `-`, `>`.
+
+### operand
+
+The object or value on either side of an operator.
+
+### option
+
+Represents a storage location for any value of a specified type.
+Mutable, can hold different values during its lifetime.
+
+See built-in Flux [options](/v2.0/reference/flux/language/options/).
+
+### option assignment
+
+An option assignment binds an identifier to an option.
+
+Learn about the [option assignment](/v2.0/reference/flux/language/assignment-scope/#option-assignment) in Flux.
+
+### organization
+
+A workspace for a group of users.
+All dashboards, tasks, buckets, members, and so on, belong to an organization.
+
+### output plugin
+
+Telegraf output plugins deliver metrics to their configured destination.
+To activate an output plugin, enable and configure the plugin in Telegraf's configuration file.
+
+Related entries: [aggregator plugin](#aggregator-plugin), [flush interval](#flush-interval), [input plugin](#input-plugin), [processor plugin](#processor-plugin)
+
+## P
+
+### parameter
+
+A key-value pair used to pass information to functions.
+
+### pipe
+
+Method for passing information from one process to another.
+For example, an output parameter from one process is input to another process.
+Information passed through a pipe is retained until the receiving process reads the information.
+
+### pipe-forward operator
+
+An operator (`|>`) used in Flux to chain operations together.
+Specifies the output from a function is input to next function.
+
+### point
+
+In InfluxDB, a point represents a single data record, similar to a row in a SQL database table.
+Each point:
+
+- has a measurement, a tag set, a field key, a field value, and a timestamp;
+- is uniquely identified by its series and timestamp.
+
+In a series, each point has a unique timestamp.
+If you write a point to a series with a timestamp that matches an existing point, the field set becomes a union of the old and new field set, where any ties go to the new field set.
+
+Related entries: [measurement](#measurement), [tag set](#tag-set), [field set](#field-set), [timestamp](#timestamp)
+
+### precision
+
+The precision configuration setting determines the timestamp precision retained for input data points.
+All incoming timestamps are truncated to the specified precision.
+Valid precisions are `ns`, `us` or `µs`, `ms`, and `s`.
+
+In Telegraf, truncated timestamps are padded with zeros to create a nanosecond timestamp.
+Telegraf output plugins emit timestamps in nanoseconds.
+For example, if the precision is set to `ms`, the nanosecond epoch timestamp `1480000000123456789` is truncated to `1480000000123` in millisecond precision and padded with zeroes to make a new, less precise nanosecond timestamp of `1480000000123000000`.
+Telegraf output plugins do not alter the timestamp further.
+The precision setting is ignored for service input plugins.
+
+Related entries: [aggregator plugin](#aggregator-plugin), [input plugin](#input-plugin), [output plugin](#output-plugin), [processor plugin](#processor-plugin), [service input plugin](#service-input-plugin)
+
+### process
+
+A set of predetermined rules.
+A process can refer to instructions being executed by the computer processor or refer to the act of manipulating data.
+
+In Flux, you can process data with [InfluxDB tasks](/v2.0/process-data/get-started/).
+
+### processor plugin
+
+Telegraf processor plugins transform, decorate, and filter metrics collected by input plugins, passing the transformed metrics to the output plugins.
+
+Related entries: [aggregator plugin](#aggregator-plugin), [input plugin](#input-plugin), [output plugin](#output-plugin)
+
+### Prometheus format
+
+A simple text-based format for exposing metrics and ingesting them into Prometheus or InfluxDB using InfluxDB scrapers.
+
+Collect data from any accessible endpoint that provides data in the [Prometheus exposition format](https://prometheus.io/docs/instrumenting/exposition_formats/).
+
+## Q
+
+### query
+
+A Flux script that returns time series data, including [tags](#tag) and [timestamps](#timestamp).
+
+See [Query data in InfluxDB](/v2.0/query-data/).
+
+## R
+
+### REPL
+
+A read-eval-print-loop is an interactive programming environment where you type a command and immediately see the result.
+See [Use the influx CLI's REPL](/v2.0/query-data/get-started/syntax-basics/#use-the-influx-cli-s-repl).
+
+### record
+
+A tuple of named values represented using an object type.
+
+### regular expressions
+
+Regular expressions (regex or regexp) are patterns used to match character combinations in strings.
+
+
+
+## S
+
+### schema
+
+How data is organized in InfluxDB.
+The fundamentals of the InfluxDB schema are buckets (which include retention policies), series, measurements, tag keys, tag values, and field keys.
+
+Related entries: [bucket](#bucket), [field key](#field-key), [measurement](#measurement), [series](#series), [tag key](#tag-key), [tag value](#tag-value)
+
+### scrape
+
+InfluxDB scrapes data from specified targets at regular intervals and writes the data to an InfluxDB bucket.
+Data can be scraped from any accessible endpoint that provides data in the [Prometheus exposition format](https://prometheus.io/docs/instrumenting/exposition_formats/).
+
+### selector
+
+A Flux function that returns a single point from the range of specified points.
+See [Flux built-in selector functions](/v2.0/reference/flux/stdlib/built-in/transformations/selectors/) for a complete list of available built-in selector functions.
+
+Related entries: [aggregate](#aggregate), [function](#function), [transformation](#transformation)
+
+### series
+
+A collection of data in the InfluxDB data structure that shares a measurement, tag set, and bucket.
+
+Related entries: [field set](#field-set), [measurement](#measurement), [tag set](#tag-set)
+
+### series cardinality
+
+The number of unique bucket, measurement, tag set, and field key combinations in an InfluxDB instance.
+
+For example, assume that an InfluxDB instance has a single bucket and one measurement.
+The single measurement has two tag keys: `email` and `status`.
+If there are three different `email`s, and each email address is associated with two
+different `status`es, the series cardinality for the measurement is 6
+(3 * 2 = 6):
+
+| email | status |
+| :-------------------- | :----- |
+| lorr@influxdata.com | start |
+| lorr@influxdata.com | finish |
+| marv@influxdata.com | start |
+| marv@influxdata.com | finish |
+| cliff@influxdata.com | start |
+| cliff@influxdata.com | finish |
+
+In some cases, performing this multiplication may overestimate series cardinality because of the presence of dependent tags.
+Dependent tags are scoped by another tag and do not increase series
+cardinality.
+If we add the tag `firstname` to the example above, the series cardinality
+would not be 18 (3 * 2 * 3 = 18).
+The series cardinality would remain unchanged at 6, as `firstname` is already scoped by the `email` tag:
+
+| email | status | firstname |
+| :------------------- | :----- | :-------- |
+| lorr@influxdata.com | start | lorraine |
+| lorr@influxdata.com | finish | lorraine |
+| marv@influxdata.com | start | marvin |
+| marv@influxdata.com | finish | marvin |
+| cliff@influxdata.com | start | clifford |
+| cliff@influxdata.com | finish | clifford |
+
+
+
+Related entries: [field key](#field-key),[measurement](#measurement), [tag key](#tag-key), [tag set](#tag-set)
+
+## series key
+
+A series key identifies a particular series by measurement, tag set, and field key.
+
+For example:
+
+```
+# measurement, tag set, field key
+h2o_level, location=santa_monica, h2o_feet
+```
+
+Related entries: [series](/influxdb/v1.7/concepts/glossary/#series)
+
+### server
+
+A computer, virtual or physical, running InfluxDB.
+
+
+Related entries: [node](#node)
+
+### service input plugin
+
+Telegraf input plugins that run in a passive collection mode while the Telegraf agent is running.
+Service input plugins listen on a socket for known protocol inputs, or apply their own logic to ingested metrics before delivering metrics to the Telegraf agent.
+
+Related entries: [aggregator plugin](#aggregator-plugin), [input plugin](#input-plugin), [output plugin](#output-plugin), [processor plugin](#processor-plugin)
+
+
+
+### Single Stat
+
+A visualization that displays the numeric value of the most recent point in a table (or series) returned by a query.
+
+### Snappy compression
+
+InfluxDB uses snappy compression to compress batches of points.
+To improve space and disk IO efficiency, each batch is compressed before being written to disk.
+
+
+
+### step-plot
+
+In InfluxDB 1.x, a [step-plot graph](https://docs.influxdata.com/chronograf/v1.7/guides/visualization-types/#step-plot-graph) displays time series data in a staircase graph.
+In InfluxDB 2.0, generate a similar graph using the step interpolation option for [line graphs](https://v2.docs.influxdata.com/v2.0/visualize-data/visualization-types/graph/#options).
+
+### stream
+
+Flux processes streams of data.
+A stream includes a series of tables over a sequence of time intervals.
+
+### string
+
+A data type used to represent text.
+
+## T
+
+### TCP
+
+InfluxDB uses Transmission Control Protocol (TCP) port 9999 for client-server communication over the InfluxDB HTTP API.
+
+
+
+### table
+
+Flux processes a series of tables for a specified time series.
+These tables in sequence result in a stream of data.
+
+### tag
+
+The key-value pair in InfluxDB's data structure that records metadata.
+Tags are an optional part of InfluxDB's data structure but they are useful for storing commonly-queried metadata; tags are indexed so queries on tags are performant.
+*Query tip:* Compare tags to fields; fields are not indexed.
+
+Related entries: [field](#field), [tag key](#tag-key), [tag set](#tag-set), [tag value](#tag-value)
+
+### tag key
+
+The key of a tag key-value pair.
+Tag keys are strings and store metadata.
+Tag keys are indexed so queries on tag keys are processed quickly.
+
+*Query tip:* Compare tag keys to field keys.
+Field keys are not indexed.
+
+Related entries: [field key](#field-key), [tag](#tag), [tag set](#tag-set), [tag value](#tag-value)
+
+### tag set
+
+The collection of tag keys and tag values on a point.
+
+Related entries: [point](#point), [series](#series), [tag](#tag), [tag key](#tag-key), [tag value](#tag-value)
+
+### tag value
+
+The value of a tag key-value pair.
+Tag values are strings and they store metadata.
+Tag values are indexed so queries on tag values are processed quickly.
+
+Related entries: [tag](#tag), [tag key](#tag-key), [tag set](#tag-set)
+
+### task
+
+A scheduled Flux query that runs periodically and may store results in a specified measurement.
+Examples include downsampling and batch jobs.
+For more information, see [Process Data with InfluxDB tasks](/v2.0/process-data/).
+
+Related entries: [function](#function)
+
+### Telegraf
+
+A plugin-driven agent that collects, processes, aggregates, and writes metrics.
+
+Related entries: [Automatically configure Telegraf](https://v2.docs.influxdata.com/v2.0/write-data/use-telegraf/auto-config/), [Manually configure Telegraf](https://v2.docs.influxdata.com/v2.0/write-data/use-telegraf/manual-config/), [Telegraf plugins](https://v2.docs.influxdata.com/v2.0/reference/telegraf-plugins/), [Use Telegraf to collect data](https://v2.docs.influxdata.com/v2.0/write-data/use-telegraf/), [View a Telegraf configuration](https://v2.docs.influxdata.com/v2.0/write-data/use-telegraf/auto-config/view-telegraf-config/)
+
+### time (data type)
+
+A data type that represents a single point in time with nanosecond precision.
+
+### time series data
+
+Sequence of data points typically consisting of successive measurements made from the same source over a time interval.
+Time series data shows how data evolves over
+time.
+On a time series data graph, one of the axes is always time.
+Time series data may be regular or irregular.
+Regular time series data changes in constant intervals.
+Irregular time series data changes at non-constant intervals.
+
+### timestamp
+
+The date and time associated with a point.
+Time in InfluxDB is in UTC.
+
+To specify time when writing data, see [Elements of line protocol](/v2.0/reference/line-protocol/#elements-of-line-protocol).
+To specify time when querying data, see [Query InfluxDB with Flux](/v2.0/query-data/get-started/query-influxdb/#2-specify-a-time-range).
+
+Related entries: [point](#point)
+
+### token
+
+Tokens verify user and organization permissions in InfluxDB.
+
+Related entries: [Create a token](https://v2.docs.influxdata.com/v2.0/security/tokens/create-token/).
+
+### tracing
+
+By default, tracing is disabled in InfluxDB.
+To enable tracing or set other InfluxDB configuration options, see [InfluxDB configuration options](https://v2.docs.influxdata.com/v2.0/reference/config-options/).
+
+### transformation
+
+An InfluxQL function that returns a value or a set of values calculated from specified points, but does not return an aggregated value across those points.
+See [InfluxQL functions](http://docs.influxdata.com/influxdb/latest/query_language/functions/#transformations) for a complete list of the available and upcoming aggregations.
+
+Related entries: [aggregate](#aggregate), [function](#function), [selector](#selector)
+
+## TSI (Time Series Index)
+
+TSI uses the operating system's page cache to pull frequently accessed data into memory and keep infrequently accessed data on disk.
+
+### TSL
+
+The Time Series Logs (TSL) extension (.tsl) identifies Time Series Index (TSI) log files, generated by the tsi1 engine.
+
+## TSM (Time Structured Merge tree)
+
+A data storage format that allows greater compaction and higher write and read throughput than B+ or LSM tree implementations.
+For more information, see [Storage engine](http://docs.influxdata.com/influxdb/latest/concepts/storage_engine/).
+
+Related entries: [TSI](#tsi-time-series-index)
+
+## U
+
+### UDP
+
+User Datagram Protocol is a packet of information.
+When a request is made, a UDP packet is sent to the recipient.
+The sender doesn't verify the packet is received.
+The sender continues to send the next packets.
+This means computers can communicate more quickly.
+This protocol is used when speed is desirable and error correction is not necessary.
+
+### universe block
+
+An implicit block that encompasses all Flux source text in a universe block.
+
+### user
+
+InfluxDB users are granted permission to access to InfluxDB.
+Users are added as a member of an organization and are given a unique authentication token.
+
+## V
+
+## values per second
+
+The preferred measurement of the rate at which data are persisted to InfluxDB.
+Write speeds are generally quoted in values per second.
+
+To calculate the values per second rate, multiply the number of points written per second by the number of values stored per point.
+For example, if the points have four fields each, and a batch of 5000 points is written 10 times per second, the values per second rate is `4 field values per point * 5000 points per batch * 10 batches per second = 200,000 values per second`.
+
+Related entries: [batch](#batch), [field](#field), [point](#point)
+
+### variable
+
+A storage location (identified by a memory address) paired with an associated symbolic name (an identifier).
+A variable contains some known or unknown quantity of information referred to as a value.
+
+### variable assignment
+
+A statement that sets or updates the value stored in a variable.
+
+In Flux, the variable assignment creates a variable bound to an identifier and gives it a type and value.
+A variable keeps the same type and value for the remainder of its lifetime.
+An identifier assigned to a variable in a block cannot be reassigned in the same block.
+
+## W
+
+
+
+### windowing
+
+Grouping data based on specified time intervals.
+For information about how to window in Flux, see [Window and aggregate data with Flux](https://v2.docs.influxdata.com/v2.0/query-data/guides/window-aggregate/).
diff --git a/content/v2.0/reference/key-concepts/_index.md b/content/v2.0/reference/key-concepts/_index.md
new file mode 100644
index 000000000..a637e6bbe
--- /dev/null
+++ b/content/v2.0/reference/key-concepts/_index.md
@@ -0,0 +1,14 @@
+---
+title: InfluxDB key concepts
+description: >
+ Concepts related to InfluxDB.
+weight: 2
+menu:
+ v2_0_ref:
+ name: Key concepts
+v2.0/tags: [key concepts]
+---
+
+Before working with InfluxDB 2.0, it's helpful to learn a few key concepts. Browse the topics below to learn more.
+
+{{< children >}}
diff --git a/content/v2.0/reference/key-concepts/data-elements.md b/content/v2.0/reference/key-concepts/data-elements.md
new file mode 100644
index 000000000..25d85d7e6
--- /dev/null
+++ b/content/v2.0/reference/key-concepts/data-elements.md
@@ -0,0 +1,175 @@
+---
+title: InfluxDB data elements
+description: >
+ InfluxDB structures data using elements such as timestamps, field keys, field values, tags, etc.
+weight: 102
+menu:
+ v2_0_ref:
+ parent: Key concepts
+ name: Data elements
+v2.0/tags: [key concepts, schema]
+---
+
+InfluxDB 2.0 includes the following data elements:
+
+- [timestamp](#timestamp)
+- [field key](#field-key)
+- [field value](#field-value)
+- [field set](#field-set)
+- [tag key](#tag-key)
+- [tag value](#tag-value)
+- [tag set](#tag-set)
+- [measurement](#measurement)
+- [series](#series)
+- [point](#point)
+- [bucket](#bucket)
+- [organization](#organization)
+
+The sample data below is used to illustrate data elements concepts.
+_Hover over highlighted terms to get acquainted with InfluxDB terminology and layout._
+
+**bucket:** `my_bucket`
+
+| _time | _measurement | location | scientist | _field | _value |
+|:------------------- |:------------ |:------- |:------ |:-- |:------ |
+| 2019-08-18T00:00:00Z | census | klamath | anderson | bees | 23 |
+| 2019-08-18T00:00:00Z | census | portland | mullen | ants | 30 |
+| 2019-08-18T00:06:00Z | census | klamath | anderson | bees | 28 |
+| 2019-08-18T00:06:00Z | census | portland | mullen | ants | 32 |
+
+## Timestamp
+
+All data stored in InfluxDB has a `_time` column that stores timestamps. On disk, timestamps are stored in epoch nanosecond format. InfluxDB formats timestamps show the date and time in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) UTC associated with data. Timestamp precision is important when you write data.
+
+## Measurement
+
+The `_measurement` column shows the name of the measurement `census`. Measurement names are strings. A measurement acts as a container for tags, fields, and timestamps. Use a measurement name that describes your data. The name `census` tells us that the field values record the number of `bees` and `ants`.
+
+## Fields
+
+A field includes a field key stored in the `_field` column and a field value stored in the `_value` column.
+
+### Field key
+
+A field key is a string that represents the name of the field. In the sample data above, `bees` and `ants` are field keys.
+
+### Field values
+
+A field value represents the value of an associated field. Field values can be strings, floats, integers, or booleans. The field values in the sample data show the number of `bees` at specified times: `23`, and `28` and the number of `ants` at a specified time: `30` and `32`.
+
+### Field sets
+
+A field set is a collection of field key-value pairs associated with a timestamp. The sample data includes the following field sets:
+
+```bash
+
+census bees=23i,ants=30i 1566086400000000000
+census bees=28i,ants=32i 1566086760000000000
+ -----------------
+ Field set
+
+```
+
+{{% note %}}
+**Fields aren't indexed:** Fields are required in InfluxDB data and are not indexed. Queries that filter field values must scan all field values to match query conditions. As a result, queries on tags > are more performant than queries on fields. **Store commonly queried metadata in tags.**
+{{% /note %}}
+
+## Tags
+
+The columns in the sample data, `location` and `scientist`, are tags.
+Tags include tag keys and tag values that are stored as strings and metadata.
+
+### Tag keys
+
+The tag keys in the sample data are `location` and `scientist`.
+
+### Tag values
+
+The tag key `location` has two tag values: `klamath` and `portland`.
+The tag key `scientist` also has two tag values: `anderson` and `mullen`.
+
+### Tag sets
+
+The collection of tag key-value pairs make up a tag set. The sample data includes the following four tag sets:
+
+```bash
+location = klamath, scientist = anderson
+location = portland, scientist = anderson
+location = klamath, scientist = mullen
+location = portland, scientist = mullen
+```
+
+{{% note %}}
+**Tags are indexed:** Tags are optional. You don't need tags in your data structure, but it's typically a good idea to include tags.
+Because tags are indexed, queries on tags are faster than queries on fields. This makes tags ideal for storing commonly-queried metadata.
+{{% /note %}}
+
+#### Why your schema matters
+
+If most of your queries focus on values in the fields, for example, a query to find when 23 bees were counted:
+
+```js
+from(bucket: "bucket-name")
+ |> range(start: 2019-08-17T00:00:00Z, stop: 2019-08-19T00:00:00Z)
+ |> filter(fn: (r) => r._field == "bees" and r._value == 23)
+```
+
+InfluxDB scans every field value in the dataset for `bees` before the query returns a response. If our sample `census` data grew to millions of rows, to optimize your query, you could rearrange your [schema](/v2.0/reference/glossary/#schema) so the fields (`bees` and `ants`) becomes tags and the tags (`location` and `scientist`) become fields:
+
+| _time | _measurement | bees | _field | _value |
+|:------------------- |:------------ |:------- |:-- |:------ |
+| 2019-08-18T00:00:00Z | census | 23 | location | klamath |
+| 2019-08-18T00:00:00Z | census | 23 | scientist | anderson |
+| 2019-08-18T00:06:00Z | census | 28 | location | klamath |
+| 2019-08-18T00:06:00Z | census | 28 | scientist | anderson |
+
+| _time | _measurement | ants | _field | _value |
+|:------------------- |:------------ |:------- |:-- |:------ |
+| 2019-08-18T00:00:00Z | census | 30 | location | portland |
+| 2019-08-18T00:00:00Z | census | 30 | scientist | mullen |
+| 2019-08-18T00:06:00Z | census | 32 | location | portland|
+| 2019-08-18T00:06:00Z | census | 32 | scientist | mullen |
+
+Now that `bees` and `ants` are tags, InfluxDB doesn't have to scan all `_field` and `_value` columns. This makes your queries faster.
+
+## Series
+
+Now that you're familiar with measurements, field sets, and tag sets, it's time to discuss series keys and series. A **series key** is a collection of points that share a measurement, tag set, and field key. For example, the [sample data](#sample-data) includes two unique series keys:
+
+| _measurement | tag set | _field |
+|:------------- |:------------------------------- |:------ |
+| census | location=klamath,scientist=anderson |bees|
+| census | location=portland,scientist=mullen | ants |
+
+A **series** includes timestamps and field values for a given series key. From the sample data, here's a **series key** and the corresponding **series**:
+
+```bash
+# series key
+census,location=klamath,scientist=anderson bees
+
+# series
+2019-08-18T00:00:00Z 23
+2019-08-18T00:06:00Z 28
+```
+
+Understanding the concept of a series is essential when designing your [schema](v2.0/reference/glossary/#schema) and working with your data in InfluxDB.
+
+## Point
+
+A **point** includes the series key, a field value, and a timestamp. For example, a single point from the [sample data](#sample-data) looks like this:
+
+`2019-08-18T00:00:00Z census ants 30 portland mullen`
+
+## Bucket
+
+All InfluxDB data is stored in a bucket. A **bucket** combines the concept of a database and a retention period (the duration of time that each data point persists). A bucket belongs to an organization. For more information about buckets, see [Manage buckets](https://v2.docs.influxdata.com/v2.0/organizations/buckets/).
+
+## Organization
+
+An InfluxDB **organization** is a workspace for a group of [users](/v2.0/users/). All [dashboards](/v2.0/visualize-data/dashboards/), [tasks](/v2.0/process-data/), buckets, and users belong to an organization. For more information about organizations, see [Manage organizations](https://v2.docs.influxdata.com/v2.0/organizations/).
+
+If you're just starting out, we recommend taking a look at the following guides:
+
+- [Getting Started](/influxdb/v0.10/introduction/getting_started/)
+- [Writing Data](/influxdb/v0.10/guides/writing_data/)
+- [Querying Data](/influxdb/v0.10/guides/querying_data/)
diff --git a/content/v2.0/reference/key-concepts/design-principles.md b/content/v2.0/reference/key-concepts/design-principles.md
new file mode 100644
index 000000000..884a1ae29
--- /dev/null
+++ b/content/v2.0/reference/key-concepts/design-principles.md
@@ -0,0 +1,44 @@
+---
+title: InfluxDB design principles
+description: >
+ Principles and tradeoffs related to InfluxDB design.
+weight: 104
+menu:
+ v2_0_ref:
+ parent: Key concepts
+ name: Design principles
+v2.0/tags: [key concepts, design principles]
+---
+
+InfluxDB implements optimal design principles for time series data. Some of these design principles may have associated tradeoffs in performance.
+
+- [Time-ordered data](#time-ordered-data)
+- [Strict update and delete permissions](#strict-update-and-delete-permissions)
+- [Handle read and write queries first](#handle-read-and-write-queries-first)
+- [Schemaless design](#schemaless-design)
+- [Datasets over individual points](#datasets-over-individual-points)
+- [Duplicate data](#duplicate-data)
+
+## Time-ordered data
+
+To improve performance, data is written in time-ascending order.
+
+## Strict update and delete permissions
+
+To increase query and write performance, InfluxDB tightly restricts **update** and **delete** permissions. Time series data is predominantly new data that is never updated. Deletes generally only affect data that isn't being written to, and contentious updates never occur.
+
+## Handle read and write queries first
+
+InfluxDB prioritizes read and write requests over strong consistency. InfluxDB returns results when a query is executed. Any transactions that affect the queried data are processed subsequently to ensure that data is eventually consistency. Therefore, if the ingest rate is high (multiple writes per ms), query results may not include the most recent data.
+
+## Schemaless design
+
+InfluxDB uses a schemaless design to better manage discontinuous data. Time series data are often ephemeral, meaning the data appears for a few hours and then go away. For example, a new host that gets started and reports for a while and then gets shut down.
+
+## Datasets over individual points
+
+Because the data set is more important than an individual point, InfluxDB implements powerful tools to aggregate data and handle large data sets. Points are differentiated by timestamp and series, so don’t have IDs in the traditional sense.
+
+## Duplicate data
+
+To simplify conflict resolution and increase write performance, InfluxDB assumes data sent multiple times is duplicate data. Identical points aren't stored twice. If a new field value is submitted for a point, InfluxDB updates the point with the most recent field value. In rare circumstances, data may be overwritten. Learn more about [duplicate points](/v2.0/write-data/best-practices/duplicate-points/).
diff --git a/content/v2.0/reference/key-concepts/table-structure.md b/content/v2.0/reference/key-concepts/table-structure.md
new file mode 100644
index 000000000..b81eb7b11
--- /dev/null
+++ b/content/v2.0/reference/key-concepts/table-structure.md
@@ -0,0 +1,24 @@
+---
+title: InfluxDB table structure
+description: >
+ InfluxDB uses a columnar system to structure tables.
+weight: 103
+menu:
+ v2_0_ref:
+ parent: Key concepts
+ name: Table structure
+v2.0/tags: [key concepts]
+---
+
+InfluxDB 2.0 uses the following columnar table structure to store data:
+
+- **Annotation rows:** include the following rows: #group, #datatype, and #default.
+- **Header row:** describes the data labels for each column in a row.
+- **Data columns:** include the following columns: annotation, result, and table.
+- **Data rows:** all rows that contain time series data. For details about the type of data stored in InfluxDB, see [InfluxDB data elements](/v2.0/reference/key-concepts/data-elements/).
+- **Group keys** determine the contents of output tables in Flux by grouping records that share common values in specified columns. Learn more about [grouping your data with Flux](/v2.0/query-data/guides/group-data/).
+
+For specifications on the InfluxDB 2.0 table structure, see [Tables](/v2.0/reference/annotated-csv/#tables).
+
+**_Tip:_** To visualize your table structure in the InfluxDB user interface, click the **Data Explorer** icon
+in the sidebar, create a query, click **Submit**, and then select **View Raw Data**.
diff --git a/content/v2.0/reference/line-protocol.md b/content/v2.0/reference/line-protocol.md
index 9b1d25ae4..5d43cfa63 100644
--- a/content/v2.0/reference/line-protocol.md
+++ b/content/v2.0/reference/line-protocol.md
@@ -93,7 +93,12 @@ The Unix nanosecond timestamp for the data point.
InfluxDB accepts one timestamp per point.
If no timestamp is provided, InfluxDB uses the system time (UTC) of its host machine.
-_**Data type:** [Unix timestamp](#unix-timestamp)_
+_**Data type:** [Unix timestamp](#unix-timestamp)_
+
+{{% note %}}
+To ensure a data point includes the time a metric is observed (not received by InfluxDB),
+include the timestamp.
+{{% /note %}}
{{% note %}}
_Use the default nanosecond precision timestamp or specify an alternative precision
@@ -235,7 +240,7 @@ Line protocol supports both literal backslashes and backslashes as an escape cha
With two contiguous backslashes, the first is interpreted as an escape character.
For example:
-| Backslashes | Intepreted as |
+| Backslashes | Interpreted as |
|:-----------:|:-------------:|
| `\` | `\` |
| `\\` | `\` |
diff --git a/content/v2.0/reference/release-notes/_index.md b/content/v2.0/reference/release-notes/_index.md
new file mode 100644
index 000000000..9d8d8392b
--- /dev/null
+++ b/content/v2.0/reference/release-notes/_index.md
@@ -0,0 +1,12 @@
+---
+title: Release notes
+description: Find important information about what's included in new versions of InfluxData products.
+menu:
+ v2_0_ref:
+ name: Release notes
+ weight: 1
+---
+
+Find important information about what's included in new versions of our products:
+
+{{< children >}}
diff --git a/content/v2.0/reference/flux/release-notes.md b/content/v2.0/reference/release-notes/flux.md
similarity index 50%
rename from content/v2.0/reference/flux/release-notes.md
rename to content/v2.0/reference/release-notes/flux.md
index 1c4f69376..65763977c 100644
--- a/content/v2.0/reference/flux/release-notes.md
+++ b/content/v2.0/reference/release-notes/flux.md
@@ -1,19 +1,491 @@
---
title: Flux release notes
-description: Important changes and notes introduced in each version of Flux.
+description: Important changes and and what's new in each version of Flux.
+weight: 102
menu:
v2_0_ref:
- parent: Flux query language
- name: Flux release notes
- weight: 101
+ parent: Release notes
+ name: Flux
+aliases:
+ - /v2.0/reference/flux/release-notes
---
{{% note %}}
-_The latest release of InfluxDB v2.0 alpha includes **Flux v0.31.0**.
+_The latest release of InfluxDB v2.0 alpha includes **Flux v0.49.0**.
Though newer versions of Flux may be available, they will not be included with
InfluxDB until the next InfluxDB v2.0 release._
{{% /note %}}
+## v0.50.2 [2019-10-24]
+
+### Bug fixes
+- Make `keep()` and `drop()` throw an error if merging tables with different schemas.
+
+---
+
+## v0.50.1 [2019-10-24]
+
+### Bug fixes
+- Add annotated errors to the execute package where it affects normal usage.
+- Reorder variables in the allocator for atomic operations.
+
+---
+
+## v0.50.0 [2019-10-11]
+
+### Features
+- Add `experimental/prometheus` package.
+- Add a memory manager to the memory allocator.
+- Add an internal function for generating data.
+- Switch to using discarding mode for transformations.
+- Group key join on `_time`.
+
+### Bug fixes
+- Require `data` parameter in `monitor.check()`.
+- Return the EOF error when reading metadata.
+- Re-add missing import.
+- Fix broken links in SPEC.
+- Return error from cache.
+- Update the `universe` package to use flux errors throughout.
+- Parse escape characters in string interpolation expressions.
+- Improve CSV error message for serialized Flux error.
+- Have the interpreter return annotated Flux errors.
+
+---
+
+## v0.49.0 [2019-09-24]
+
+### Features
+- Optimize `filter()` to pass through tables when possible.
+- Additional arrow builder utilities.
+- Add a `benchmark()` function to the testing package.
+- Add an arrow backed version of the table buffer.
+
+### Bug fixes
+- Fix `sql.from()` connection leak.
+- Fix some of the memory leaks within the standard library.
+- Fix `mqtt.to()` topic parameter.
+
+---
+
+## v0.48.0 [2019-09-20]
+
+### Breaking changes
+- Convert the Flux memory allocator into an arrow allocator.
+
+### Features
+- New dependency injection framework.
+- Add planner options to Flux language.
+- Make Flux `internal/promql/quantile` behavior match PromQL `quantile` aggregate.
+
+### Bug fixes
+- Passing context to WalkIR.
+- Make `join()` reject input tables lacking `on` columns.
+
+---
+
+## v0.47.1 [2019-09-18]
+
+### Bug fixes
+- Pass dependencies to WalkIR
+
+---
+
+## v0.47.0 [2019-09-13]
+
+### Bug fixes
+- Introduce ParenExpression.
+- Make fmt runs cargo fmt on Rust directories.
+- Update `Hex.Dump` to `hex.EncodeToString`.
+- Integrate the Promql transpiler into Flux.
+
+---
+
+## v0.46.2 [2019-09-12]
+
+### Bug fixes
+- Make `to` use URL validator.
+- Add filesystem to default test dependencies.
+
+---
+
+## v0.46.1 [2019-09-11]
+
+### Bug fixes
+- Add a filesystem service.
+- Do a pointer comparison for table objects instead of a deep compare.
+
+---
+
+## v0.46.0 [2019-09-10]
+
+### Features
+- Replace EnvironmentSecretService with EmptySecret….
+- Source location for rust parser.
+
+### Bug fixes
+- Push error for bad string expression.
+- Remove `token` parameter from `pagerduty.endpoint`.
+
+---
+
+## v0.45.2 [2019-09-10]
+
+### Bug fixes
+- Push the tag before running goreleaser.
+- Additional opentracing spans for debugging query flow.
+
+---
+
+## v0.45.1 [2019-09-09]
+
+### Bug fixes
+- Ensure `http.post` respects the context.
+
+---
+
+## v0.45.0 [2019-09-06]
+
+### Features
+- Added Google Bigtable `from()`.
+
+### Bug fixes
+- Add `pagerduty.severityFromLevel()` helper function.
+- Sleep function now gets canceled when the context is canceled.
+- Categorize the undefined identifier as an invalid status code.
+- Panic from `CheckKind` in `memberEvaluator`.
+
+---
+
+## v0.44.0 [2019-09-05]
+
+### Features
+- Add `http.basicAuth` function.
+- Add measurement filters to `monitor.from` and `monitor.logs`.
+
+### Bug fixes
+- changed the default HTTP client to be more robust.
+
+---
+
+## v0.43.0 [2019-09-04]
+
+### Features
+- PagerDuty endpoint for alerts and notifications.
+
+---
+
+## v0.42.0 [2019-08-30]
+
+### Features
+- Add `stateChanges` function.
+
+### Bug fixes
+- Race condition in looking up types in `map`.
+- Support bool equality expressions.
+- Calculating a type variable's free type variables.
+- Do not generate fresh type variables for member expressions.
+- Array instantiation.
+
+---
+
+## v0.41.0 [2019-08-26]
+
+### Features
+- Add ability to validate URLs before making `http.post` requests.
+- Evaluate string interpolation.
+- Implement the `secrets.get` function.
+- Added secret service interface.
+- Add secrets package that will construct a secret object.
+- Added a SecretService interface and a new dependencies package and a basic test of functionality.
+- Add Slack endpoint.
+
+### Bug fixes
+- Make `reset()` check for non-nil data before calling `Release()`.
+- Add test case for `notify` function.
+- Add missing math import to test case.
+- Make packages aware of options.
+- Resolved `holtWinters` panic.
+- Use non-pointer receiver for `interpreter.function`.
+
+---
+
+## v0.40.2 [2019-08-22]
+
+### Bug fixes
+- Resolved `holtWinters()` panic.
+
+---
+
+## v0.40.1 [2019-08-21]
+
+### Bug fixes
+- Use non-pointer receiver for `interpreter.function`.
+
+---
+
+## v0.40.0 [2019-08-20]
+
+### Breaking changes
+- Update compiler package to use true scope.
+- Add `http` and `json` to prelude.
+
+### Features
+- Add `alerts.check()` function.
+- Add `alerts.notify` function.
+- Add `kaufmansER()` and `kaufmansAMA()` functions.
+- Add `experimental.to()` function.
+- Add `experimental.set()` function to update entire object.
+- Add `experimental.objectKeys()` function.
+- Add `tripleExponentialDerivative()` function.
+- Add `json.encode()` function.
+- Add `mqtt.to()` function.
+- Add Bytes type.
+- Update compiler package to use true scope.
+- Add http endpoint.
+- Add post method implementation.
+- String interpolation.
+
+### Bug fixes
+- Avoid wrapping table errors in the CSV encoder.
+- Remove irrelevant TODOs.
+- `mode()` now properly considers nulls when calculating the mode.
+- Add `http` and `json` to prelude.
+- Rename all Flux test files to use `_test.flux`.
+
+---
+
+## v0.39.0 [2019-08-13]
+
+{{% warn %}}
+In Flux 0.39.0, `holtWinters()` can cause the query engine to panic.
+**Flux 0.40.2 resolves this panic.**
+{{% /warn %}}
+
+### Breaking changes
+- Implement the scanning components for string expressions.
+
+### Features
+- Add `tail()` function.
+- Add framework for `http.post()` function.
+- Implement `deadman()` function.
+- Time arithmetic functions.
+- Alerts package.
+- Add an experimental `group()` function with mode `extend`.
+- Implement the scanning components for string expressions.
+- Add `chandeMomentumOscillator()` function.
+- Add `hourSelection()` function.
+- Add `date.year()` function
+
+### Bug fixes
+- Update object to use Invalid type instead of nil monotypes.
+- Make it so the alerts package can be defined in pure Flux.
+- Close connection after `sql.to()`.
+
+---
+
+## v0.38.0 [2019-08-06]
+
+### Features
+- Update selectors to operate on time columns.
+- Add `relativeStrengthIndex()` transformation.
+- Add double and triple exponential average transformations (`doubleEMA()` and `tripleEMA()`).
+- Add `holtWinters()` transformation.
+- Add `keepFirst` parameter to `difference()`.
+- DatePart equivalent functions.
+- Add runtime package.
+- Add and subtract duration literal arithmetic.
+- Allow `keep()` to run regardless of nonexistent columns.
+ If all columns given are nonexistent, `keep()` returns an empty table.
+- Scanner returns positioning.
+
+### Bug fixes
+- Function resolver now keeps track of local assignments that may be evaluated at runtime.
+- Fixed InfluxDB test errors.
+- Add range to tests to pass in InfluxDB.
+- Allow converting a duration to a duration.
+- Catch integer overflow and underflow for literals.
+
+---
+
+## v0.37.2 [2019-07-24]
+
+- _General cleanup of internal code._
+
+---
+
+## v0.37.1 [2019-07-23]
+
+### Bug fixes
+- Fixed InfluxDB test errors.
+- Add range to tests to pass in InfluxDB.
+
+---
+
+## v0.37.0 [2019-07-22]
+
+### Features
+- Add PromQL to Flux transpiler and Flux helper functions.
+- Add mutable arrow array builders.
+- Created date package.
+- Return query and result errors in the multi result encoder.
+- Add `exponentialMovingAverage()`.
+- Add full draft of Rust parser.
+- Implement more production rules.
+- AST marshalling.
+- Parse statements.
+- Parse integer and float literals.
+- Add initial Rust implementation of parser.
+
+---
+
+## v0.36.2 [2019-07-12]
+
+### Bug fixes
+- Add helper methods for comparing entire result sets.
+- Map will not panic when a record is `null`.
+
+---
+
+## v0.36.1 [2019-07-10]
+
+### Bug fixes
+- Add `range` call to some end-to-end tests.
+- Fix implementation of `strings.replaceAll`.
+
+---
+
+## v0.36.0 [2019-07-09]
+
+### Features
+- Updated `movingAverage()` and added `timedMovingAverage`.
+- `elapsed()` function.
+- `mode()` function.
+- `sleep()` function.
+- Modify error usage in places to use the new enriched errors.
+- Enriched error interface.
+- End-to-end tests that show how to mimic pandas functionality.
+- End-to-end tests for string functions.
+
+### Bug fixes
+- Fix `difference()` so that it returns an error instead of panicking when given a `_time` column.
+- Added end-to-end tests for type conversion functions.
+- Make `map()` error if return type is not an object.
+- Fixed miscounted allocations in the `ColListTableBuilder`.
+- Support formatting `with`.
+
+### Breaking changes
+- Updated `movingAverage()` to `timedMovingAverage` and added new
+ `movingAverage()` implementation.
+
+---
+
+## v0.35.1 [2019-07-03]
+
+### Bug fixes
+- Re-add `mergeKey` parameter to `map()` in deprecated state.
+
+---
+
+## v0.35.0 [2019-07-02]
+
+### Breaking changes
+- Remove `mergeKey` parameter from the `map()` function.
+
+### Features
+- Add `sql.to()` function.
+- Add `movingAverage()` function.
+- Add `strlen()` and `substring()` functions to the `strings` package.
+
+### Bug fixes
+- Remove `mergeKey` parameter from the `map()` function.
+- Parse float types with PostgreSQL.
+
+---
+
+## v0.34.2 [2019-06-27]
+
+### Bug fixes
+- Parse float types with PostgreSQL.
+
+---
+
+## v0.34.1 [2019-06-26]
+
+### Features
+- Add custom PostgreSQL type support.
+- Added MySQL type support.
+- Nulls work in table and row functions.
+
+### Bug fixes
+- Fixed boolean literal type conversion problem and added tests.
+- Diff should track memory allocations when it copies the table.
+- Copy table will report if it is empty correctly.
+
+---
+
+## v0.33.2 [2019-06-25]
+
+### Bug fixes
+- Use `strings.Replace` instead of `strings.ReplaceAll` for compatibility.
+
+---
+
+## v0.33.1 [2019-06-20]
+
+### Bug fixes
+- Copy table will report if it is empty correctly.
+
+---
+
+## v0.33.0 [2019-06-18]
+
+### Breaking changes
+- Implement nulls in the compiler runtime.
+
+### Features
+- Add Go `regexp` functions to Flux.
+- Add the exists operator to the compiler runtime.
+- Implement nulls in the compiler runtime.
+- Add nullable kind.
+- Support "with" syntax for objects in row functions.
+- Port several string functions from go `strings` library to Flux.
+- Add exists unary operator.
+
+### Bug fixes
+- Add range to map_extension_with.flux.
+- Row function resets records map with each call to prepare.
+- Fix `joinStr`, including adding an EndToEnd Test.
+- Fix `string_trimLeft` and `string_trimRight` so that they pass in InfluxDB.
+- Add length check for empty tables in fill.
+
+---
+
+## v0.32.1 [2019-06-10]
+
+### Bug fixes
+- Identify memory limit exceeded errors in dispatcher.
+
+---
+
+## v0.32.0 [2019-06-05]
+
+### Breaking changes
+- Remove the control package.
+
+### Bug fixes
+- Changelog generator now handles merge commits better.
+- Return count of errors when checking AST.
+
+---
+
+## v0.31.1 [2019-05-29]
+
+### Bug fixes
+- Do not call done after calling the function.
+
+---
+
## v0.31.0 [2019-05-28]
### Breaking changes
@@ -25,6 +497,8 @@ InfluxDB until the next InfluxDB v2.0 release._
### Bug fixes
- Copy the table when a table is used multiple times.
+---
+
## v0.30.0 [2019-05-16]
### Features
@@ -39,9 +513,9 @@ InfluxDB until the next InfluxDB v2.0 release._
### Features
- Add stream table index functions (
- [`tableFind()`](/v2.0/reference/flux/functions/built-in/transformations/stream-table/tablefind/),
- [`getRecord()`](/v2.0/reference/flux/functions/built-in/transformations/stream-table/getrecord/),
- [`getColumn()`](/v2.0/reference/flux/functions/built-in/transformations/stream-table/getcolumn/)
+ [`tableFind()`](/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/tablefind/),
+ [`getRecord()`](/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/getrecord/),
+ [`getColumn()`](/v2.0/reference/flux/stdlib/built-in/transformations/stream-table/getcolumn/)
).
- Construct invalid binary expressions when given multiple expressions.
diff --git a/content/v2.0/cloud/about/release-notes.md b/content/v2.0/reference/release-notes/influxdb-cloud.md
similarity index 51%
rename from content/v2.0/cloud/about/release-notes.md
rename to content/v2.0/reference/release-notes/influxdb-cloud.md
index 8737c5a6f..fac08da6c 100644
--- a/content/v2.0/cloud/about/release-notes.md
+++ b/content/v2.0/reference/release-notes/influxdb-cloud.md
@@ -1,11 +1,36 @@
---
title: InfluxDB Cloud release notes
-description: Important changes and notes introduced in each InfluxDB Cloud 2.0 update.
+description: Important changes and and what's new in each InfluxDB Cloud 2.0 update.
weight: 101
menu:
- v2_0_cloud:
- parent: About InfluxDB Cloud
- name: Release notes
+ v2_0_ref:
+ parent: Release notes
+ name: InfluxDB Cloud
+aliases:
+ - /cloud/about/release-notes
+---
+
+## 2019-09-10 _Monitoring & Alerts_
+
+## Features
+- **InfluxDB 2.0 alpha-17** –
+ _See the [alpha-17 release notes](/v2.0/reference/release-notes/influxdb/#v2-0-0-alpha-17-2019-08-14) for details._
+- Alerts and Notifications to Slack (Free Tier), PagerDuty and HTTP (Pay As You Go).
+- Rate limiting on cardinality for Free Tier.
+- Billing notifications.
+- Pricing calculator.
+- Improved Signup flow.
+
+## 2019-07-23 _General Availability_
+
+### Features
+
+- **InfluxDB 2.0 alpha-15** –
+ _See the [alpha-9 release notes](/v2.0/reference/release-notes/influxdb/#v2-0-0-alpha-15-2019-07-11) for details._
+- Pay As You Go Pricing Plan.
+- Adjusted Free Plan rate limits.
+- Timezone selection in the user interface.
+
---
## 2019-05-06 _Public Beta_
@@ -28,7 +53,7 @@ menu:
### Features
- **InfluxDB 2.0 alpha-9** –
-_See the [alpha-9 release notes](/v2.0/reference/release-notes/#v2-0-0-alpha-9-2019-05-01) for details._
+ _See the [alpha-9 release notes](/v2.0/reference/release-notes/influxdb/#v2-0-0-alpha-9-2019-05-01) for details._
### Bug fixes
@@ -45,7 +70,7 @@ _See the [alpha-9 release notes](/v2.0/reference/release-notes/#v2-0-0-alpha-9-2
### Features
- **InfluxDB 2.0 alpha-7** –
-_See the [alpha-7 release notes](/v2.0/reference/release-notes/#v2-0-0-alpha-7-2019-03-28) for details._
+ _See the [alpha-7 release notes](/v2.0/reference/release-notes/influxdb/#v2-0-0-alpha-7-2019-03-28) for details._
### Bug fixes
diff --git a/content/v2.0/reference/release-notes.md b/content/v2.0/reference/release-notes/influxdb.md
similarity index 64%
rename from content/v2.0/reference/release-notes.md
rename to content/v2.0/reference/release-notes/influxdb.md
index 76489403a..0dccc200c 100644
--- a/content/v2.0/reference/release-notes.md
+++ b/content/v2.0/reference/release-notes/influxdb.md
@@ -1,15 +1,154 @@
---
title: InfluxDB v2.0 release notes
-description:
+description: Important changes and and what's new in each version of InfluxDB.
menu:
v2_0_ref:
- name: Release notes
- weight: 1
+ name: InfluxDB
+ parent: Release notes
+weight: 101
+---
+## v2.0.0-alpha.18 [2019-09-26]
+
+### Features
+- Add jsonweb package for future JWT support.
+- Added the JMeter Template dashboard.
+
+### UI Improvements
+- Display dashboards index as a grid.
+- Add viewport scaling to html meta for responsive mobile scaling.
+- Remove rename and delete functionality from system buckets.
+- Prevent new buckets from being named with the reserved `_` prefix.
+- Prevent user from selecting system buckets when creating Scrapers, Telegraf configurations, read/write tokens, and when saving as a task.
+- Limit values from draggable threshold handles to 2 decimal places.
+- Redesign check builder UI to fill the screen and make more room for composing message templates.
+- Move Tokens tab from Settings to Load Data page.
+- Expose all Settings tabs in navigation menu.
+- Added Stream and table functions to query builder.
+
+### Bug Fixes
+- Remove scrollbars blocking onboarding UI step.
+
+---
+
+## v2.0.0-alpha.17 [2019-08-14]
+
+### Features
+- Optional gzip compression of the query CSV response.
+- Add task types.
+- When getting task runs from the API, runs will be returned in order of most recently scheduled first.
+
+### Bug Fixes
+- Fix authentication when updating a task with invalid org or bucket.
+- Update the documentation link for Telegraf.
+- Fix to surface errors properly as task notifications on create.
+- Fix limiting of get runs for task.
+
+---
+
+## v2.0.0-alpha.16 [2019-07-25]
+
+### Bug Fixes
+- Add link to documentation text in line protocol upload overlay.
+- Fix issue in Authorization API, can't create auth for another user.
+- Fix Influx CLI ignored user flag for auth creation.
+- Fix the map example in the documentation.
+- Ignore null/empty Flux rows which prevents a single stat/gauge crash.
+- Fixes an issue where clicking on a dashboard name caused an incorrect redirect.
+- Upgrade templates lib to 0.5.0.
+- Upgrade giraffe lib to 0.16.1.
+- Fix incorrect notification type for manually running a task.
+- Fix an issue where canceled tasks did not resume.
+
+---
+
+## v2.0.0-alpha.15 [2019-07-11]
+
+### Features
+- Add time zone support to UI.
+- Added new storage inspection tool to verify TSM files.
+
+### Bug Fixes
+- Fix incorrect reporting of tasks as successful when errors occur during result iteration.
+
+#### Known Issues
+The version of Flux included in Alpha 14 introduced `null` support.
+Most issues related to the `null` implementation have been fixed, but one known issue remains –
+The `map()` function panics if the first record processed has a `null` value.
+
+---
+
+## v2.0.0-alpha.14 [2019-06-28]
+
+### Features
+- Add `influxd inspect verify-wal` tool.
+- Move to [Flux 0.34.2](/v2.0/reference/release-notes/flux/#v0-34-2-2019-06-27) -
+ includes new string functions and initial multi-datasource support with `sql.from()`.
+- Only click save once to save cell.
+- Enable selecting more columns for line visualizations.
+
+### UI Improvements
+- Draw gauges correctly on HiDPI displays.
+- Clamp gauge position to gauge domain.
+- Improve display of error messages.
+- Remove rendering bottleneck when streaming Flux responses.
+- Prevent variable dropdown from clipping.
+
+---
+
+## v2.0.0-alpha.13 [2019-06-13]
+
+### Features
+- Add static templates for system, Docker, Redis, Kubernetes.
+
+---
+
+## v2.0.0-alpha.12 [2019-06-13]
+
+### Features
+- Enable formatting line graph y ticks with binary prefix.
+- Add x and y column pickers to graph types.
+- Add option to shade area below line graphs.
+
+### Bug Fixes
+- Fix performance regression in graph tooltips.
+
+---
+
+## v2.0.0-alpha.11 [2019-05-31]
+
+### Bug Fixes
+- Correctly check if columnKeys include xColumn in heatmap.
+
+---
+
+## v2.0.0-alpha.10 [2019-05-30]
+
+### Features
+- Add heatmap visualization type.
+- Add scatterplot graph visualization type.
+- Add description field to tasks.
+- Add CLI arguments for configuring session length and renewal.
+- Add smooth interpolation option to line graphs.
+
+### Bug Fixes
+- Removed hardcoded bucket for Getting Started with Flux dashboard.
+- Ensure map type variables allow for selecting values.
+- Generate more idiomatic Flux in query builder.
+- Expand tab key presses to two spaces in the Flux editor.
+- Prevent dragging of variable dropdowns when dragging a scrollbar inside the dropdown.
+- Improve single stat computation.
+- Fix crash when opening histogram settings with no data.
+
+### UI Improvements
+- Render checkboxes in query builder tag selection lists.
+- Fix jumbled card text in Telegraf configuration wizard.
+- Change scrapers in scrapers list to be resource cards.
+- Export and download resource with formatted resource name with no spaces.
+
---
## v2.0.0-alpha.9 [2019-05-01]
-
{{% warn %}}
**This will remove all tasks from your InfluxDB v2.0 instance.**
diff --git a/content/v2.0/reference/telegraf-plugins.md b/content/v2.0/reference/telegraf-plugins.md
new file mode 100644
index 000000000..7fc39fa29
--- /dev/null
+++ b/content/v2.0/reference/telegraf-plugins.md
@@ -0,0 +1,40 @@
+---
+title: Telegraf plugins
+description: >
+ Telegraf is a plugin-driven agent that collects, processes, aggregates, and writes metrics.
+ It supports four categories of plugins including input, output, aggregator, and processor.
+ View and search all available Telegraf plugins.
+menu: v2_0_ref
+weight: 6
+---
+
+Telegraf is a plugin-driven agent that collects, processes, aggregates, and writes metrics.
+It supports four categories of plugins including input, output, aggregator, and processor.
+
+- [Input plugins](#input-plugins)
+- [Output plugins](#output-plugins)
+- [Aggregator plugins](#aggregator-plugins)
+- [Processor plugins](#processor-plugins)
+
+{{< telegraf/filters >}}
+
+## Input plugins
+Telegraf input plugins are used with the InfluxData time series platform to collect
+metrics from the system, services, or third party APIs.
+
+{{< telegraf/plugins type="input" >}}
+
+## Output plugins
+Telegraf processor plugins write metrics to various destinations.
+
+{{< telegraf/plugins type="output" >}}
+
+## Aggregator plugins
+Telegraf aggregator plugins create aggregate metrics (for example, mean, min, max, quantiles, etc.)
+
+{{< telegraf/plugins type="aggregator" >}}
+
+## Processor plugins
+Telegraf output plugins transform, decorate, and filter metrics.
+
+{{< telegraf/plugins type="processor" >}}
diff --git a/content/v2.0/security/secrets/_index.md b/content/v2.0/security/secrets/_index.md
new file mode 100644
index 000000000..7e06958bb
--- /dev/null
+++ b/content/v2.0/security/secrets/_index.md
@@ -0,0 +1,40 @@
+---
+title: Store and use secrets
+description:
+v2.0/tags: [secrets, security]
+menu:
+ v2_0:
+ parent: Security & authorization
+weight: 102
+---
+
+There are two options for storing secrets with InfluxDB:
+
+- By default, secrets are Base64-encoded and stored in the InfluxDB embedded key value store, [BoltDB](https://github.com/boltdb/bolt).
+- You can also set up Vault to store secrets. For details, see [Store secrets in Vault](/v2.0/security/secrets/use-vault).
+
+{{% cloud-msg %}}
+By default, all secrets added to InfluxDB Cloud are stored in the InfluxDB Cloud Vault cluster.
+{{% /cloud-msg %}}
+
+## Use secrets in a query
+Import the `influxdata/influxd/secrets` package and use the `secrets.get()` function
+to populate sensitive data in queries with secrets from your secret store.
+
+```js
+import "influxdata/influxdb/secrets"
+import "sql"
+
+username = secrets.get(key: "POSTGRES_USERNAME")
+password = secrets.get(key: "POSTGRES_PASSWORD")
+
+sql.from(
+ driverName: "postgres",
+ dataSourceName: "postgresql://${username}:${password}@localhost",
+ query:"SELECT * FROM example-table"
+)
+```
+
+## Add, list, and delete secrets
+
+See [Manage secrets](/v2.0/security/secrets/manage-secrets).
diff --git a/content/v2.0/security/secrets/manage-secrets.md b/content/v2.0/security/secrets/manage-secrets.md
new file mode 100644
index 000000000..9b4054267
--- /dev/null
+++ b/content/v2.0/security/secrets/manage-secrets.md
@@ -0,0 +1,69 @@
+---
+title: Manage secrets
+description: Manage secrets in InfluxDB with the InfluxDB API.
+v2.0/tags: [secrets, security]
+menu:
+ v2_0:
+ parent: Store and use secrets
+weight: 201
+---
+
+
+Manage secrets using the InfluxDB `/org/{orgID}/secrets` API endpoint.
+All secrets belong to an organization and are stored in your [secret-store](/v2.0/security/secrets/).
+Include your [organization ID](/v2.0/organizations/view-orgs/#view-your-organization-id)
+and [authentication token](/v2.0/security/tokens/view-tokens/) with each request.
+
+### Add a secret
+Use the `PATCH` request method to add a new secret to your organization.
+Pass the secret key-value pair in the request body.
+
+```sh
+curl -XPATCH http://localhost:9999/api/v2/orgs//secrets \
+ -H 'authorization: Token YOURAUTHTOKEN' \
+ -H 'Content-type: application/json' \
+ --data '{
+ "": ""
+}'
+```
+
+### View secret keys
+Use the `GET` request method to view your organization's secrets keys.
+
+```sh
+curl -XGET http://localhost:9999/api/v2/orgs//secrets \
+ -H 'authorization: Token YOURAUTHTOKEN'
+```
+
+### Delete a secret
+Use the `POST` request method and the `orgs/{orgID}/secrets/delete` API endpoint
+to delete one or more secrets.
+Include an array of secret keys to delete in the requests body in the following format.
+
+```bash
+curl -XGET http://localhost:9999/api/v2/orgs//secrets/delete \
+ --H 'authorization: Token YOURAUTHTOKEN'
+ --data '{
+ "secrets": [
+ ""
+ ]
+}'
+```
+
+## Use secrets in a query
+Import the `influxdata/influxd/secrets` package and use the `secrets.get()` function
+to populate sensitive data in queries with secrets from your secret store.
+
+```js
+import "influxdata/influxdb/secrets"
+import "sql"
+
+username = secrets.get(key: "POSTGRES_USERNAME")
+password = secrets.get(key: "POSTGRES_PASSWORD")
+
+sql.from(
+ driverName: "postgres",
+ dataSourceName: "postgresql://${username}:${password}@localhost",
+ query:"SELECT * FROM example-table"
+)
+```
diff --git a/content/v2.0/security/secrets/use-vault.md b/content/v2.0/security/secrets/use-vault.md
new file mode 100644
index 000000000..1f6303fda
--- /dev/null
+++ b/content/v2.0/security/secrets/use-vault.md
@@ -0,0 +1,72 @@
+---
+title: Store secrets in Vault
+description: Manage secrets in InfluxDB using the InfluxDB UI or the influx CLI.
+v2.0/tags: [secrets, security]
+menu:
+ v2_0:
+ parent: Store and use secrets
+weight: 201
+---
+
+[Vault](https://www.vaultproject.io/) secures, stores, and tightly controls access
+to tokens, passwords, certificates, and other sensitive secrets.
+Store sensitive secrets in Vault using the InfluxDB built-in Vault integration.
+
+{{% cloud-msg %}}
+By default, all secrets added to InfluxDB Cloud are stored in the InfluxDB Cloud Vault cluster.
+{{% /cloud-msg %}}
+
+## Start a Vault server
+
+Start a Vault server and ensure InfluxDB has network access to the server.
+
+The following links provide information about running Vault in both development and production:
+
+- [Install Vault](https://learn.hashicorp.com/vault/getting-started/install)
+- [Start a Vault dev server](https://learn.hashicorp.com/vault/getting-started/dev-server)
+- [Deploy Vault](https://learn.hashicorp.com/vault/getting-started/deploy)
+
+{{% note %}}
+InfluxDB supports the [Vault KV Secrets Engine Version 2 API](https://www.vaultproject.io/api/secret/kv/kv-v2.html) only.
+When you create a secrets engine, enable the `kv-v2` version by running:
+
+```js
+vault secrets enable kv-v2
+```
+{{% /note %}}
+
+For this example, install Vault on your local machine and start a Vault dev server.
+
+```sh
+vault server -dev
+```
+
+## Define Vault environment variables
+
+Use [Vault environment variables](https://www.vaultproject.io/docs/commands/index.html#environment-variables)
+to provide connection credentials and other important Vault-related information to InfluxDB.
+
+#### Required environment variables
+
+- `VAULT_ADDR`: The API address of your Vault server _(provided in the Vault server output)_.
+- `VAULT_TOKEN`: The [Vault token](https://learn.hashicorp.com/vault/getting-started/authentication)
+ required to access your Vault server.
+
+_Your Vault server configuration may require other environment variables._
+
+```sh
+export VAULT_ADDR='http://127.0.0.1:8200' VAULT_TOKEN='s.0X0XxXXx0xXxXXxxxXxXxX0x'
+```
+
+## Start InfluxDB
+
+Start the [`influxd` service](/v2.0/reference/cli/influxd/) with the `--secret-store`
+option set to `vault`.
+
+```bash
+influxd --secret-store vault
+```
+
+## Manage tokens through the InfluxDB API
+Use the InfluxDB `/org/{orgID}/secrets` API endpoint to add tokens to Vault.
+For details, see [Manage secrets](/v2.0/security/secrets/manage-secrets/).
diff --git a/content/v2.0/security/tokens/create-token.md b/content/v2.0/security/tokens/create-token.md
index d364d99bd..0352c5a6c 100644
--- a/content/v2.0/security/tokens/create-token.md
+++ b/content/v2.0/security/tokens/create-token.md
@@ -16,9 +16,9 @@ command line interface (CLI).
## Create a token in the InfluxDB UI
-1. Click the **Settings** icon in the navigation bar.
+1. Click the **Load Data** icon in the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "disks" >}}
2. Click **Tokens**.
3. Click the **+ Generate** dropdown in the upper right and select a token type (**Read/Write Token** or **All Access Token**).
diff --git a/content/v2.0/security/tokens/delete-token.md b/content/v2.0/security/tokens/delete-token.md
index 13083063a..b96e308af 100644
--- a/content/v2.0/security/tokens/delete-token.md
+++ b/content/v2.0/security/tokens/delete-token.md
@@ -17,9 +17,9 @@ have access to your InfluxDB instance.
## Delete tokens in the InfluxDB UI
-1. Click the **Settings** icon in the navigation bar.
+1. Click the Click the **Load Data** icon in the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "disks" >}}
2. Click **Tokens**. All of your account's tokens appear.
3. Hover over the token you want to delete and click **Delete** and **Confirm**.
diff --git a/content/v2.0/security/tokens/update-tokens.md b/content/v2.0/security/tokens/update-tokens.md
index eb1b86533..b566c8809 100644
--- a/content/v2.0/security/tokens/update-tokens.md
+++ b/content/v2.0/security/tokens/update-tokens.md
@@ -15,9 +15,9 @@ Update an authentication token's description using the InfluxDB user interface (
## Update a token in the InfluxDB UI
-1. Click the **Settings** icon in the navigation bar.
+1. Click the **Load Data** icon in the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "disks" >}}
2. Click **Tokens**. All of your account's tokens appear.
3. Click the pencil icon {{< icon "pencil" >}} next to the token's name in the **Description** column.
diff --git a/content/v2.0/security/tokens/view-tokens.md b/content/v2.0/security/tokens/view-tokens.md
index 6dc3537e9..3e5fe9d22 100644
--- a/content/v2.0/security/tokens/view-tokens.md
+++ b/content/v2.0/security/tokens/view-tokens.md
@@ -16,9 +16,9 @@ command line interface (CLI).
## View tokens in the InfluxDB UI
-1. Click the **Settings** icon in the navigation bar.
+1. Click the **Load Data** icon in the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "disks" >}}
2. Click **Tokens**. All of your account's tokens appear.
3. Click on a token name from the list to view the token and a summary of access permissions.
diff --git a/content/v2.0/security/use-vault.md b/content/v2.0/security/use-vault.md
deleted file mode 100644
index 5fc8bc3bd..000000000
--- a/content/v2.0/security/use-vault.md
+++ /dev/null
@@ -1,129 +0,0 @@
----
-title: Store secrets in Vault
-description: Manage authentication tokens in InfluxDB using the InfluxDB UI or the influx CLI.
-v2.0/tags: [tokens, security]
-menu:
- v2_0:
- parent: Security & authorization
-weight: 102
----
-
-[Vault](https://www.vaultproject.io/) secures, stores, and tightly controls access
-to tokens, passwords, certificates, and other sensitive secrets.
-Store sensitive secrets in Vault using the InfluxDB built-in Vault integration.
-
-{{% note %}}
-When not using Vault, secrets are Base64-encoded and stored in the InfluxDB embedded key value store,
-[BoltDB](https://github.com/boltdb/bolt).
-{{% /note %}}
-
-## Start a Vault server
-Start a Vault server and ensure InfluxDB has network access to the server.
-The following links provide information about running Vault in both development and production:
-
-- [Install Vault](https://learn.hashicorp.com/vault/getting-started/install)
-- [Start a Vault dev server](https://learn.hashicorp.com/vault/getting-started/dev-server)
-- [Deploy Vault](https://learn.hashicorp.com/vault/getting-started/deploy)
-
-For this example, install Vault on your local machine and start a Vault dev server.
-
-```sh
-vault server -dev
-```
-
-## Define Vault environment variables
-Use [Vault environment variables](https://www.vaultproject.io/docs/commands/index.html#environment-variables)
-to provide connection credentials and other important Vault-related information to InfluxDB.
-
-#### Required environment variables
-- `VAULT_ADDR`: The API address of your Vault server _(provided in the Vault server output)_.
-- `VAULT_TOKEN`: The [Vault token](https://learn.hashicorp.com/vault/getting-started/authentication)
- required to access your Vault server.
-
-_Your Vault server configuration may require other environment variables._
-
-```sh
-export VAULT_ADDR='http://127.0.0.1:8200' VAULT_TOKEN='s.0X0XxXXx0xXxXXxxxXxXxX0x'
-```
-
-## Start InfluxDB
-Start the [`influxd` service](/v2.0/reference/cli/influxd/) with the `--secret-store`
-option set to `vault`.
-
-```bash
-influxd --secret-store vault
-```
-
-## Test Vault storage
-With Vault and InfluxDB servers running, use the InfluxDB API to test Vault:
-
-{{% note %}}
-Replace `` with your [organization ID](/v2.0/organizations/view-orgs/#view-your-organization-id)
-and `YOURAUTHTOKEN` with your [InfluxDB authentication token](/v2.0/security/tokens/).
-{{% /note %}}
-
-##### Retrieve an organization's secrets
-```sh
-curl --request GET \
- --url http://localhost:9999/api/v2/orgs//secrets \
- --header 'authorization: Token YOURAUTHTOKEN'
-
-# should return
-# {
-# "links": {
-# "org": "/api/v2/orgs/031c8cbefe101000",
-# "secrets": "/api/v2/orgs/031c8cbefe101000/secrets"
-# },
-# "secrets": []
-# }
-```
-
-##### Add secrets to an organization
-```sh
-curl --request PATCH \
- --url http://localhost:9999/api/v2/orgs//secrets \
- --header 'authorization: Token YOURAUTHTOKEN' \
- --header 'content-type: application/json' \
- --data '{
- "foo": "bar",
- "hello": "world"
-}'
-
-# should return 204 no content
-```
-
-##### Retrieve the added secrets
-```bash
-curl --request GET \
- --url http://localhost:9999/api/v2/orgs//secrets \
- --header 'authorization: Token YOURAUTHTOKEN'
-
-# should return
-# {
-# "links": {
-# "org": "/api/v2/orgs/031c8cbefe101000",
-# "secrets": "/api/v2/orgs/031c8cbefe101000/secrets"
-# },
-# "secrets": [
-# "foo",
-# "hello"
-# ]
-# }
-```
-
-## Vault secrets storage
-For each organization, InfluxDB creates a [secrets engine](https://learn.hashicorp.com/vault/getting-started/secrets-engines)
-using the following pattern:
-
-```
-/secret/data/
-```
-
-Secrets are stored in Vault as key value pairs in their respective secrets engines.
-
-```
-/secret/data/031c8cbefe101000 ->
- this_key: foo
- that_key: bar
- a_secret: key
-```
diff --git a/content/v2.0/visualize-data/_index.md b/content/v2.0/visualize-data/_index.md
index 5ee95a4ed..9888843a9 100644
--- a/content/v2.0/visualize-data/_index.md
+++ b/content/v2.0/visualize-data/_index.md
@@ -7,7 +7,7 @@ v2.0/tags: [visualize]
menu:
v2_0:
name: Visualize data
-weight: 4
+weight: 5
---
The InfluxDB user interface (UI) provides tools for building custom dashboards to visualize your data.
diff --git a/content/v2.0/visualize-data/dashboards/control-dashboard.md b/content/v2.0/visualize-data/dashboards/control-dashboard.md
new file mode 100644
index 000000000..b3101e639
--- /dev/null
+++ b/content/v2.0/visualize-data/dashboards/control-dashboard.md
@@ -0,0 +1,61 @@
+---
+title: Control a dashboard
+seotitle: Control an InfluxDB dashboard
+description: Control an InfluxDB dashboard in the InfluxDB user interface (UI).
+v2.0/tags: [dashboards]
+menu:
+ v2_0:
+ name: Control a dashboard
+ parent: Manage dashboards
+weight: 203
+---
+
+## Control at the dashboard level
+
+Use dashboard controls in the upper right to update your dashboard.
+
+### Add a cell
+
+Click {{< icon "add-cell" >}} **Add Cell** to open the Data Explorer and configure a new cell for your dashboard.
+
+For details on using the Data Explorer, see [Explore metrics](/v2.0/visualize-data/explore-metrics/).
+
+### Add a note
+
+1. Click {{< icon "note" >}} **Add Note** to add a note cell to your dashboard.
+2. Enter your note in Markdown in the left pane. A preview appears in the right pane.
+3. Enable the **Show note when query returns no data** option to show the note only when the query displays no data.
+4. Click **Save**.
+
+### Select timezone
+
+Click the timezone dropdown to select a timezone to use for the dashboard. Select either the local time (default) or UTC.
+
+{{< img-hd src="/img/timezone.png" alt="Select timezone" />}}
+
+### Select auto-refresh interval
+
+Select how frequently to refresh the dashboard's data. By default, refreshing is paused.
+
+{{< img-hd src="/img/refresh-interval.png" alt="Select refresh interval" />}}
+
+### Manually refresh dashboard
+
+Click the refresh button ({{< icon "refresh" >}}) to manually refresh the dashboard's data.
+
+### Select time range
+
+1. Select from the time range options in the dropdown menu.
+
+{{< img-hd src="/img/time-range.png" alt="Select time range" />}}
+
+2. Select **Custom Time Range** to enter a custom time range with precision up to nanoseconds.
+The default time range is 5 minutes.
+
+### Add variables
+
+Click **Variables** to display variables available for your dashboard. For details, see [Use and manage variables](/v2.0/visualize-data/variables/)
+
+### Presentation mode
+
+Click the fullscreen icon ({{< icon "fullscreen" >}}) to enter presentation mode. Presentation mode allows you to view [a dashboard] in full screen, hiding the left and top navigation menus so only the cells appear. This mode might be helpful, for example, for stationary screens dedicated to monitoring visualizations.
diff --git a/content/v2.0/visualize-data/dashboards/create-dashboard.md b/content/v2.0/visualize-data/dashboards/create-dashboard.md
index d9109a30a..14ee507a8 100644
--- a/content/v2.0/visualize-data/dashboards/create-dashboard.md
+++ b/content/v2.0/visualize-data/dashboards/create-dashboard.md
@@ -12,27 +12,17 @@ menu:
weight: 201
---
-## Create a dashboard
+## Create a new dashboard
-**To create a new dashboard**:
1. Click the **Dashboards** icon in the navigation bar.
- {{< nav-icon "dashboards" >}}
+ {{< nav-icon "dashboards" >}}
2. Click the **+Create Dashboard** menu in the upper right and select **New Dashboard**.
3. Enter a name for your dashboard in the **Name this dashboard** field in the upper left.
-**To create a dashboard from a template in the templates UI**:
-
-1. Click the **Settings** icon in the left navigation.
-
- {{< nav-icon "settings" >}}
-
-2. Select the **Templates** tab.
-3. Hover over the name of the template you want to create a dashboard from, then click **Create**.
-
**To import an existing dashboard**:
1. Click the **Dashboards** icon in the navigation bar.
@@ -64,15 +54,18 @@ weight: 201
{{< nav-icon "settings" >}}
2. Select the **Templates** tab.
+
+ - In the **Static Templates** tab, a list of pre-created templates appears.
+ - In the **User Templates** tab, a list of custom user-created templates appears.
+
3. Hover over the name of the template you want to create a dashboard from, then click **Create**.
+
## Clone a dashboard
1. Hover over the dashboard name in the list of dashboard to show options.
2. Click **Clone**. The cloned dashboard opens.
- 
-
#### Add data to your dashboard
diff --git a/content/v2.0/visualize-data/dashboards/delete-dashboard.md b/content/v2.0/visualize-data/dashboards/delete-dashboard.md
index 0aa1a60db..1709d3175 100644
--- a/content/v2.0/visualize-data/dashboards/delete-dashboard.md
+++ b/content/v2.0/visualize-data/dashboards/delete-dashboard.md
@@ -13,9 +13,7 @@ To delete a dashboard from the InfluxDB user interface (UI):
1. Hover over the dashboard name in the list of dashboards to show options.
2. Click **{{< icon "trash" >}}**.
-3. Click **Confirm** to delete your dashboard.
-
- 
+3. Click **Delete** to delete your dashboard.
{{% warn %}}
Deleting a dashboard cannot be undone.
diff --git a/content/v2.0/visualize-data/explore-metrics.md b/content/v2.0/visualize-data/explore-metrics.md
index 87da02a94..5156061f8 100644
--- a/content/v2.0/visualize-data/explore-metrics.md
+++ b/content/v2.0/visualize-data/explore-metrics.md
@@ -28,7 +28,7 @@ See [Get started with Flux](/v2.0/query-data/get-started) to learn more about Fl
1. Click the **Data Explorer** icon in the sidebar.
- {{< nav-icon "data-explorer" >}}
+ {{< nav-icon "data-explorer" >}}
2. Use the Flux builder in the bottom panel to select a bucket and filters such as measurement, field or tag.
Alternatively, click **Script Editor** to manually edit the query.
@@ -41,6 +41,7 @@ See [Get started with Flux](/v2.0/query-data/get-started) to learn more about Fl
* Click on the name of the query in the tab to rename it.
## Visualize your query
+
Select from available [visualization types](/v2.0/visualize-data/visualization-types/) or enable the **View Raw Data** option to view all of your query's results.
1. Select a visualization type from the dropdown menu in the upper-left.
@@ -51,6 +52,43 @@ Select from available [visualization types](/v2.0/visualize-data/visualization-t
For details about all of the available visualization options, see
[Visualization types](/v2.0/visualize-data/visualization-types/).
+## Control your dashboard cell
+
+From the cell editor overlay, use the controls in the lower pane to control your dashboard.
+
+### View raw data
+
+Toggle the **View Raw data** {{< icon "toggle" >}} option to see your data in table format instead of a graph. Use this option when data can't be visualized using a visualization type.
+
+ {{< img-hd src="/img/view-raw-data.png" alt="View raw data" />}}
+
+### Save as CSV
+
+Click the CSV icon to save the cells contents as a CSV file.
+
+### Select auto-refresh interval
+
+Select how frequently to refresh the dashboard's data. By default, refreshing is paused.
+
+{{< img-hd src="/img/refresh-interval.png" alt="Select refresh interval" />}}
+
+### Manually refresh dashboard
+
+Click the refresh button ({{< icon "refresh" >}}) to manually refresh the dashboard's data.
+
+### Select time range
+
+1. Select from the time range options in the dropdown menu.
+
+ {{< img-hd src="/img/time-range.png" alt="Select time range" />}}
+
+2. Select **Custom Time Range** to enter a custom time range with precision up to nanoseconds.
+The default time range is 5m.
+
+### Query Builder or Script Editor
+
+Click **Query Builder** to use the builder to create a Flux query. Click **Script Editor** to manually edit the query.
+
## Save your query as a dashboard cell or task
**To save your query**:
diff --git a/content/v2.0/visualize-data/sources.md b/content/v2.0/visualize-data/sources.md
index f2ed22959..cb08903c0 100644
--- a/content/v2.0/visualize-data/sources.md
+++ b/content/v2.0/visualize-data/sources.md
@@ -14,11 +14,11 @@ Like dashboards and buckets, data sources are scoped by organization. When you f
**To add data to a bucket**:
-1. Click in the **Organizations** icon in the navigation bar.
+1. Click the **Settings** tab in the navigation bar.
- {{< nav-icon "orgs" >}}
+ {{< nav-icon "settings" >}}
-2. Select the **Buckets** tab.
+2. Click the **Buckets** tab.
3. Next to the name of a bucket, click **Add Data**.
4. Select **Streaming**, **Line Protocol**, or **Scraping** from the data source options.
5. Click **Continue**.
@@ -26,15 +26,16 @@ Like dashboards and buckets, data sources are scoped by organization. When you f
**To manage Telegraf configurations**:
-1. Click in the **Organizations** icon in the navigation bar.
+1. Click the **Settings** tab in the navigation bar.
- {{< nav-icon "orgs" >}}
+ {{< nav-icon "settings" >}}
-2. Select the **Telegraf** tab. A list of existing Telegraf configurations appears.
+2. Click the **Telegraf** tab. A list of existing Telegraf configurations appears.
3. To add a new Telegraf configuration:
* Click **Create Configuration** in the upper right.
* Select the Telegraf plugins you want to use to collect data.
* Click **Continue**.
* Follow the instructions on the **Listen for Streaming Data** page that appears to complete your configuration.
4. To delete a Telegraf configuration, hover over its name in the list and click **Delete**.
-5. To view details of a Telegraf configuration, hover over its name in the list and click **Download Config**.
+5. To view details of a Telegraf configuration, click on its name in the list.
+ - To save the Telegraf configuration, click **Download Config**.
diff --git a/content/v2.0/visualize-data/templates/view-template.md b/content/v2.0/visualize-data/templates/view-template.md
index 3803d3a4d..5a7346c9a 100644
--- a/content/v2.0/visualize-data/templates/view-template.md
+++ b/content/v2.0/visualize-data/templates/view-template.md
@@ -16,5 +16,190 @@ To view templates in the InfluxDB UI:
{{< nav-icon "settings" >}}
-2. Select the **Templates** tab. The list of templates appears.
+2. Select the **Templates** tab.
+
+ - In the **Static Templates** tab, a list of pre-created templates appears. For a list of static templates, see [Static templates](#static-templates) below.
+ - In the **User Templates** tab, a list of custom user-created templates appears.
+
3. Click on the name of a template to view its JSON.
+
+## Static templates
+The following Telegraf-related dashboards templates are available:
+
+- [Docker](#docker)
+- [Getting Started with Flux](#getting-started-with-flux)
+- [Kubernetes](#kubernetes)
+- [Local Metrics](#local-metrics)
+- [Nginx](#nginx)
+- [Redis](#redis)
+- [System](#system)
+
+### Docker
+The Docker dashboard template contains an overview of Docker metrics. It displays the following information:
+
+- System Uptime
+- nCPUs
+- System Load
+- Total Memory
+- Memory Usage
+- Disk Usage
+- CPU Usage
+- System Load
+- Swap
+- Number of Docker containers
+- CPU usage per container
+- Memory usage % per container
+- Memory usage per container
+- Network TX traffic per container/sec
+- Network RX traffic per container/sec
+- Disk I/O read per container/sec
+- Disk I/O write per container/sec
+
+
+#### Plugins
+
+- [`cpu` plugin](/v2.0/reference/telegraf-plugins/#cpu)
+- [`disk` plugin](/v2.0/reference/telegraf-plugins/#disk)
+- [`diskio` plugin](/v2.0/reference/telegraf-plugins/#diskio)
+- [`docker` plugin](//v2.0/reference/telegraf-plugins/#docker)
+- [`mem` plugin](/v2.0/reference/telegraf-plugins/#mem)
+- [`swap` plugin](/v2.0/reference/telegraf-plugins/#swap)
+- [`system` plugin](/v2.0/reference/telegraf-plugins/#system)
+
+### Getting Started with Flux
+This dashboard is designed to get you started with the Flux language. It contains explanations and visualizations for a series of increasingly complex example Flux queries.
+
+- Creating your first basic Flux query
+- Filtering data using the `filter` function
+- Windowing data with the `window` function
+- Aggregating data with the `aggregateWindow` function
+- Multiple aggregates using Flux variables and the `yield` function
+- Joins and maps with the `join`, `map`, `group`, and `drop` functions
+
+#### Plugins
+
+- [`cpu` plugin](/v2.0/reference/telegraf-plugins/#cpu)
+- [`disk` plugin](/v2.0/reference/telegraf-plugins/#disk)
+
+### Kubernetes
+
+The Kubernetes dashboard gives a visual overview of Kubernetes metrics. It displays the following information:
+
+- Allocatable Memory
+- Running Pods
+- Running Containers
+- K8s Node Capacity CPUs
+- K8s Node Allocatable CPUs
+- DaemonSet
+- Capacity Pods
+- Allocatable Pods
+- Resource Requests CPU
+- Resource Limit milliscpu
+- Resource Memory
+- Node Memory
+- Replicas Available
+- Persistent Volumes Status
+- Running Containers
+
+#### Plugins
+- [`kubernetes` plugin](/v2.0/reference/telegraf-plugins/)
+
+### Local Metrics
+The Local Metrics dashboard shows a visual overview of some of the metrics available from the Local Metrics endpoint located at /`metrics`. It displays the following information:
+
+- Uptime
+- Instance Info
+- # of Orgs
+- # of Users
+- # of Buckets
+- # of Tokens
+- # of Telegraf configurations
+- # of Dashboards
+- # of Scrapers
+- # of Tasks
+- Local Object Store IO
+- Memory Allocations (Bytes)
+- Memory Usage (%)
+- Memory Allocs & Frees (Bytes)
+
+### Nginx
+The Nginx dashboard gives a visual overview of Nginx metrics. It displays the following information:
+
+- System Uptime
+- nCPUs
+- System Load
+- Total Memory
+- Memory Usage
+- Disk Usage
+- CPU Usage
+- System Load
+- Swap
+- Nginx active connections
+- Nginx reading: writing/waiting
+- Nginx requests & connections/min
+- Network
+
+
+#### Plugins
+- [`cpu` plugin](/v2.0/reference/telegraf-plugins/#cpu)
+- [`disk` plugin](/v2.0/reference/telegraf-plugins/#disk)
+- [`diskio` plugin](/v2.0/reference/telegraf-plugins/#diskio)
+- [`mem` plugin](/v2.0/reference/telegraf-plugins/#mem)
+- [`nginx` plugin](/v2.0/reference/telegraf-plugins/#nginx)
+- [`swap` plugin](/v2.0/reference/telegraf-plugins/#swap)
+- [`system` plugin](/v2.0/reference/telegraf-plugins/#system)
+
+### Redis
+The Redis dashboard gives a visual overview of Nginx metrics. It displays the following information:
+
+- System Uptime
+- nCPUs
+- System Load
+- Total Memory
+- Memory Usage
+- Disk Usage
+- CPU Usage
+- System Load
+- Swap
+- Redis used memory
+- Redis CPU
+- Redis # commands processed per sec
+- Redis eviced/expired keys
+- Redis connected slaves
+- Keyspace hitrate
+- Redis - Network Input/Output
+- Redis connections
+- Redis uptime
+
+#### Plugins
+- [`cpu` plugin](/v2.0/reference/telegraf-plugins/#cpu)
+- [`disk` plugin](/v2.0/reference/telegraf-plugins/#disk)
+- [`mem` plugin](/v2.0/reference/telegraf-plugins/#mem)
+- [`redis` plugin](/v2.0/reference/telegraf-plugins/#redis)
+- [`swap` plugin](/v2.0/reference/telegraf-plugins/#swap)
+- [`system` plugin](/v2.0/reference/telegraf-plugins/#system)
+
+
+### System
+The System dashboard gives a visual overview of system metrics. It displays the following information:
+
+- System Uptime
+- nCPUs
+- System Load
+- Total Memory
+- Memory Usage
+- Disk Usage
+- CPU Usage
+- System Load
+- Disk IO
+- Network
+- Processes
+- Swap
+
+#### Plugins
+- [`disk` plugin](/v2.0/reference/telegraf-plugins/#disk)
+- [`diskio` plugin](/v2.0/reference/telegraf-plugins/#diskio)
+- [`mem` plugin](/v2.0/reference/telegraf-plugins/#mem)
+- [`net` plugin](/v2.0/reference/telegraf-plugins/#net)
+- [`swap` plugin](/v2.0/reference/telegraf-plugins/#swap)
+- [`system` plugin](/v2.0/reference/telegraf-plugins/#system)
diff --git a/content/v2.0/visualize-data/variables/_index.md b/content/v2.0/visualize-data/variables/_index.md
index 58c6529d8..2ee717f8f 100644
--- a/content/v2.0/visualize-data/variables/_index.md
+++ b/content/v2.0/visualize-data/variables/_index.md
@@ -40,14 +40,14 @@ The InfluxDB user interface (UI) provides the following predefined dashboard var
#### v.timeRangeStart
Specifies the beginning of the queried time range.
-This variable is typically used to define the [`start` parameter](/v2.0/reference/flux/functions/built-in/transformations/range#start)
+This variable is typically used to define the [`start` parameter](/v2.0/reference/flux/stdlib/built-in/transformations/range#start)
of the `range()` function.
The **Time Range** selector defines the value of this variable.
#### v.timeRangeStop
Specifies the end of the queried time range.
-This variable is typically used to define the [`stop` parameter](/v2.0/reference/flux/functions/built-in/transformations/range#stop)
+This variable is typically used to define the [`stop` parameter](/v2.0/reference/flux/stdlib/built-in/transformations/range#stop)
of the `range()` function.
The **Time Range** selector defines the value of this variable.
@@ -56,11 +56,12 @@ It defaults to `now`.
#### v.windowPeriod
Specifies the period of windowed data.
This variable is typically used to define the `every` or `period` parameters of the
-[`window()` function](/v2.0/reference/flux/functions/built-in/transformations/window)
+[`window()` function](/v2.0/reference/flux/stdlib/built-in/transformations/window)
in data aggregation operations.
-The value of this variable is calculated by dividing the total time within the displayed
-time range by the dashboard refresh interval (defined by the **Refresh** dropdown).
+The value of this variable is calculated by analyzing the duration of the Flux
+query it is used within. Queries that fetch data from a longer time range will
+have a larger `v.windowPeriod` duration.
## Custom dashboard variables
Create, manage, and use custom dashboard variables in the InfluxDB user interface (UI).
diff --git a/content/v2.0/visualize-data/variables/common-variables.md b/content/v2.0/visualize-data/variables/common-variables.md
index 2f8312f92..77d57e749 100644
--- a/content/v2.0/visualize-data/variables/common-variables.md
+++ b/content/v2.0/visualize-data/variables/common-variables.md
@@ -13,9 +13,9 @@ weight: 208
List all buckets in the current organization.
_**Flux functions:**
-[buckets()](/v2.0/reference/flux/functions/built-in/inputs/buckets/),
-[rename()](/v2.0/reference/flux/functions/built-in/transformations/rename/),
-[keep()](/v2.0/reference/flux/functions/built-in/transformations/keep/)_
+[buckets()](/v2.0/reference/flux/stdlib/built-in/inputs/buckets/),
+[rename()](/v2.0/reference/flux/stdlib/built-in/transformations/rename/),
+[keep()](/v2.0/reference/flux/stdlib/built-in/transformations/keep/)_
```js
buckets()
@@ -26,8 +26,8 @@ buckets()
## List measurements
List all measurements in a specified bucket.
-_**Flux package:** [InfluxDB v1](/v2.0/reference/flux/functions/influxdb-v1/)
-**Flux functions:** [v1.measurements()](/v2.0/reference/flux/functions/influxdb-v1/measurements/)_
+_**Flux package:** [InfluxDB v1](/v2.0/reference/flux/stdlib/influxdb-v1/)
+**Flux functions:** [v1.measurements()](/v2.0/reference/flux/stdlib/influxdb-v1/measurements/)_
```js
import "influxdata/influxdb/v1"
@@ -37,8 +37,8 @@ v1.measurements(bucket: "bucket-name")
## List fields in a measurement
List all fields in a specified bucket and measurement.
-_**Flux package:** [InfluxDB v1](/v2.0/reference/flux/functions/influxdb-v1/)
-**Flux functions:** [v1.measurementTagValues()](/v2.0/reference/flux/functions/influxdb-v1/measurementtagvalues/)_
+_**Flux package:** [InfluxDB v1](/v2.0/reference/flux/stdlib/influxdb-v1/)
+**Flux functions:** [v1.measurementTagValues()](/v2.0/reference/flux/stdlib/influxdb-v1/measurementtagvalues/)_
```js
import "influxdata/influxdb/v1"
@@ -49,11 +49,12 @@ v1.measurementTagValues(
)
```
-## List hosts
-List all `host` tag values in a specified bucket.
+## List unique tag values
+List all unique tag values for a specific tag in a specified bucket.
+The example below lists all unique values of the `host` tag.
-_**Flux package:** [InfluxDB v1](/v2.0/reference/flux/functions/influxdb-v1/)_
-_**Flux functions:** [v1.measurements()](/v2.0/reference/flux/functions/influxdb-v1/measurements/)_
+_**Flux package:** [InfluxDB v1](/v2.0/reference/flux/stdlib/influxdb-v1/)_
+_**Flux functions:** [v1.measurements()](/v2.0/reference/flux/stdlib/influxdb-v1/measurements/)_
```js
import "influxdata/influxdb/v1"
@@ -64,8 +65,8 @@ v1.tagValues(bucket: "bucket-name", tag: "host")
List all Docker containers when using the Docker Telegraf plugin.
_**Telegraf plugin:** [Docker](https://docs.influxdata.com/telegraf/latest/plugins/inputs/#docker)_
-_**Flux package:** [InfluxDB v1](/v2.0/reference/flux/functions/influxdb-v1/)_
-_**Flux functions:** [v1.tagValues()](/v2.0/reference/flux/functions/influxdb-v1/tagvalues/)_
+_**Flux package:** [InfluxDB v1](/v2.0/reference/flux/stdlib/influxdb-v1/)_
+_**Flux functions:** [v1.tagValues()](/v2.0/reference/flux/stdlib/influxdb-v1/tagvalues/)_
```js
import "influxdata/influxdb/v1"
@@ -76,8 +77,8 @@ v1.tagValues(bucket: "bucket-name", tag: "container_name")
List all Kubernetes pods when using the Kubernetes Telegraf plugin.
_**Telegraf plugin:** [Kubernetes](https://docs.influxdata.com/telegraf/latest/plugins/inputs/#kubernetes)_
-_**Flux package:** [InfluxDB v1](/v2.0/reference/flux/functions/influxdb-v1/)_
-_**Flux functions:** [v1.measurementTagValues()](/v2.0/reference/flux/functions/influxdb-v1/measurementtagvalues/)_
+_**Flux package:** [InfluxDB v1](/v2.0/reference/flux/stdlib/influxdb-v1/)_
+_**Flux functions:** [v1.measurementTagValues()](/v2.0/reference/flux/stdlib/influxdb-v1/measurementtagvalues/)_
```js
import "influxdata/influxdb/v1"
@@ -92,8 +93,8 @@ v1.measurementTagValues(
List all Kubernetes nodes when using the Kubernetes Telegraf plugin.
_**Telegraf plugin:** [Kubernetes](https://docs.influxdata.com/telegraf/latest/plugins/inputs/#kubernetes)_
-_**Flux package:** [InfluxDB v1](/v2.0/reference/flux/functions/influxdb-v1/)_
-_**Flux functions:** [v1.measurementTagValues()](/v2.0/reference/flux/functions/influxdb-v1/measurementtagvalues/)_
+_**Flux package:** [InfluxDB v1](/v2.0/reference/flux/stdlib/influxdb-v1/)_
+_**Flux functions:** [v1.measurementTagValues()](/v2.0/reference/flux/stdlib/influxdb-v1/measurementtagvalues/)_
```js
import "influxdata/influxdb/v1"
diff --git a/content/v2.0/visualize-data/variables/create-variable.md b/content/v2.0/visualize-data/variables/create-variable.md
index 6abe0cd0f..2609fe97b 100644
--- a/content/v2.0/visualize-data/variables/create-variable.md
+++ b/content/v2.0/visualize-data/variables/create-variable.md
@@ -9,53 +9,58 @@ weight: 201
"v2.0/tags": [variables]
---
-Create dashboard variables in the Data Explorer, from the Organization page, or import a variable.
+Create dashboard variables in the Data Explorer, from the Settings section, or import a variable.
**Variable names must be unique.**
+There are multiple variable types that provide different means of populating your list of variable values.
_For information about variable types, see [Variable types](/v2.0/visualize-data/variables/variable-types/)._
-### Create a variable in the Data Explorer
+## Create a variable in the Data Explorer
{{% note %}}
-Only [Query variables](/v2.0/visualize-data/variables/variable-types/#query)
-can be created from the Data Explorer.
+InfluxData recommends using the Data Explorer to create
+[Query dashboard variables](/v2.0/visualize-data/variables/variable-types/#query).
+The [Table visualization type](/v2.0/visualize-data/visualization-types/table/) and
+**View Raw Data** option provide human-readable query results.
{{% /note %}}
1. Click the **Data Explorer** icon in the sidebar.
{{< nav-icon "data-explorer" >}}
-2. Click **Script Editor** on the lower right.
-3. Build the query for your variable using the [Table visualization type](/v2.0/visualize-data/visualization-types/#table) or enable the **View Raw Data** option.
+2. Use the **Query Builder** or **Script Editor** to build a query.
+3. Use the [Table visualization type](/v2.0/visualize-data/visualization-types/table/)
+ or enable the **View Raw Data** option to view human-readable query results.
4. Click **Save As** in the upper right.
5. In the window that appears, select **Variable**.
6. Enter a name for your variable in the **Name** field.
7. Click **Create**.
-### Create a variable in the configuration page
+_For information about common Query variables, see [Common variable queries](/v2.0/visualize-data/variables/common-variables/)._
+
+## Create a variable in the Settings section
1. Click the **Settings** icon in the navigation bar.
{{< nav-icon "settings" >}}
2. Select the **Variables** tab.
-3. Click **+Create Variable**.
+3. Click **{{< icon "plus" >}} Create Variable**.
4. Enter a name for your variable.
5. Select your [variable type](/v2.0/visualize-data/variables/variable-types/).
6. Enter the appropriate variable information.
7. Click **Create**.
## Import a variable
+InfluxDB lets you import variables exported from InfluxDB in JSON format.
-1. Click the **Organizations** icon in the navigation bar.
+1. Click the **Settings** icon in the navigation bar.
- {{< nav-icon "orgs" >}}
+ {{< nav-icon "settings" >}}
-2. Select an organization from the list.
-3. Select the **Variables** tab.
-4. Click the **+Create Variable** dropdown menu and select **Import Variable**.
-3. In the window that appears:
- * Select **Upload File** to drag-and-drop or select a file.
- * Select **Paste JSON** to paste in JSON.
-4. Select an organization from the **Destination Organization** dropdown.
-5. Click **Import JSON as Variable**.
+2. Select the **Variables** tab.
+3. Click the **{{< icon "plus" >}} Create Variable** drop-down menu and select **Import Variable**.
+4. In the window that appears:
+ - Select **Upload File** to drag and drop or select a file.
+ - Select **Paste JSON** to paste in JSON.
+6. Click **Import JSON as Variable**.
diff --git a/content/v2.0/visualize-data/variables/variable-types.md b/content/v2.0/visualize-data/variables/variable-types.md
index cc771cc68..f84ca59cc 100644
--- a/content/v2.0/visualize-data/variables/variable-types.md
+++ b/content/v2.0/visualize-data/variables/variable-types.md
@@ -49,11 +49,11 @@ _For examples of dashboard variable queries, see [Common variable queries](/v2.0
#### Important things to note about variable queries
- The variable will only use values from the `_value` column.
If the data you’re looking for is in a column other than `_value`, use the
- [`rename()`](/v2.0/reference/flux/functions/built-in/transformations/rename/) or
- [`map()`](/v2.0/reference/flux/functions/built-in/transformations/map/) functions
+ [`rename()`](/v2.0/reference/flux/stdlib/built-in/transformations/rename/) or
+ [`map()`](/v2.0/reference/flux/stdlib/built-in/transformations/map/) functions
to change the name of that column to `_value`.
- The variable will only use the first table in the output stream.
- Use the [`group()` function](/v2.0/reference/flux/functions/built-in/transformations/group)
+ Use the [`group()` function](/v2.0/reference/flux/stdlib/built-in/transformations/group)
to group everything into a single table.
- Do not use any [predefined dashboard variables](/v2.0/visualize-data/variables/#predefined-dashboard-variables) in variable queries.
{{% /note %}}
diff --git a/content/v2.0/visualize-data/visualization-types.md b/content/v2.0/visualize-data/visualization-types.md
deleted file mode 100644
index e48da718a..000000000
--- a/content/v2.0/visualize-data/visualization-types.md
+++ /dev/null
@@ -1,210 +0,0 @@
----
-title: Visualization types
-description: >
- InfluxDB dashboards support multiple visualization types including line graphs,
- gauges, tables, and more.
-menu:
- v2_0:
- name: Visualization types
- parent: Visualize data
-weight: 101
----
-The InfluxDB's user interface's (UI) dashboard views support the following visualization types,
-which can be selected in the **Visualization Type** selection view of the
-[Data Explorer](/v2.0/visualize-data/explore-metrics).
-
-Each of the available visualization types and available user controls are described below.
-
-
-* [Graph](#graph)
-* [Graph + Single Stat](#graph-single-stat)
-* [Histogram](#histogram)
-* [Single Stat](#single-stat)
-* [Gauge](#gauge)
-* [Table](#table)
-
-### Graph
-
-There are several types of graphs you can create.
-
-To select this view, select the **Graph** option from the visualization dropdown in the upper right.
-
-#### Graph Controls
-
-To view **Graph** controls, click the settings icon ({{< icon "gear" >}}) next to the visualization dropdown in the upper right.
-
-* **Geometry**: Select from the following options:
- - **Line**: Display a time series in a line graph.
- - **Stacked**: Display multiple time series bars as segments stacked on top of each other.
- - **Step**: Display a time series in a staircase graph.
- - **Bar**: Display the specified time series using a bar chart.
-* **Line Colors**: Select a color scheme to use for your graph.
-* **Title**: y-axis title. Enter title, if using a custom title.
-* **Min**: Minimum y-axis value.
- - **Auto** or **Custom**: Enable or disable auto-setting.
-* **Max**: Maximum y-axis value.
- - **Auto** or **Custom**: Enable or disable auto-setting.
-* **Y-Value's Prefix**: Prefix to be added to y-value.
-* **Y-Value's Suffix**: Suffix to be added to y-value.
-* **Y-Value's Format**: Select between **K/M/B** (Thousand/Million/Billion), **K/M/G** (Kilo/Mega/Giga), or **Raw**.
-* **Scale**: Toggle between **Linear** and **Logarithmic**.
-
-##### Line Graph example
-
-{{< img-hd src="/img/2-0-visualizations-line-graph-example.png" alt="Line Graph example" />}}
-
-##### Stacked Graph example
-
-{{< img-hd src="/img/2-0-visualizations-stacked-graph-example.png" alt="Stacked Graph example" />}}
-
-#### Step-Plot Graph example
-
-{{< img-hd src="/img/2-0-visualizations-step-plot-graph-example.png" alt="Step-Plot Graph example" />}}
-
-##### Bar Graph example
-
-{{< img-hd src="/img/2-0-visualizations-bar-graph-example.png" alt="Bar Graph example" />}}
-
-### Graph + Single Stat
-
-The **Graph + Single Stat** view displays the specified time series in a line graph and overlays the single most recent value as a large numeric value.
-
-To select this view, select the **Graph + Single Stat** option from the visualization dropdown in the upper right.
-
-#### Graph + Single Stat Controls
-
-To view **Graph + Single Stat** controls, click the settings icon ({{< icon "gear" >}}) next to the visualization dropdown in the upper right.
-
-* **Line Colors**: Select the a color scheme to use for your graph.
-
-* **Left Y Axis** section:
- * **Title**: y-axis title. Enter title, if using a custom title.
- * **Min**: Minimum y-axis value.
- - **Auto** or **Custom**: Enable or disable auto-setting.
- * **Max**: Maximum y-axis value.
- - **Auto** or **Custom**: Enable or disable auto-setting.
- * **Y-Value's Prefix**: Prefix to be added to y-value.
- * **Y-Value's Suffix**: Suffix to be added to y-value.
- * **Y-Value's Format**: Select between **K/M/B** (Thousand/Million/Billion), **K/M/G** (Kilo/Mega/Giga), or **Raw**.
- * **Scale**: Toggle between **Linear** and **Logarithmic**.
-
-* **Customize Single-Stat** section:
- * **Prefix**: Prefix to be added to the single stat.
- * **Suffix**: Suffix to be added to the single stat.
- * **Decimal Places**: The number of decimal places to display for the single stat.
- - **Auto** or **Custom**: Enable or disable auto-setting.
-
-* **Colorized Thresholds** section:
- * **Base Color**: Select a base, or background, color from the selection list.
- * **Add a Threshold**: Change the color of the single stat based on the current value.
- * **Value is**: Enter the value at which the single stat should appear in the selected color. Choose a color from the dropdown menu next to the value.
- * **Colorization**: Choose **Text** for the single stat to change color based on the configured thresholds. Choose **Background** for the background of the graph to change color based on the configured thresholds.
-
-#### Graph + Single Stat example
-
-{{< img-hd src="/img/2-0-visualizations-line-graph-single-stat-example.png" alt="Line Graph + Single Stat example" />}}
-
-### Histogram
-
-A histogram is a way to view the distribution of data. Unlike column charts, histograms have no time axis. The y-axis is dedicated to count, and the x-axis is divided into bins.
-
-To select this view, select the **Histogram** option from the visualization dropdown in the upper right.
-
-#### Histogram Controls
-
-To view **Histogram** controls, click the settings icon ({{< icon "gear" >}}) next to the visualization dropdown in the upper right.
-
-* **Data** section:
- * **Column**: The column to select data from.
- * **Group By**: The tags to sort by.
-* **Options** section:
- * **Color Scheme**: Select a color scheme to use for your graph.
- * **Positioning**: Select (**Stacked**) to display data stacked on top of each other for each bin, or select
- * **Bins**: Enter a number of bins to divide data into or select Auto to automatically calculate the number of bins.
- * **Auto** or **Custom**: Enable or disable auto-setting.
-
-#### Histogram example
-
-{{< img-hd src="/img/2-0-visualizations-histogram-example.png" alt="Histogram example" />}}
-
-### Single Stat
-
-The **Single Stat** view displays the most recent value of the specified time series as a numerical value.
-
-To select this view, select the **Single Stat** option from the visualization dropdown in the upper right.
-
-#### Single Stat Controls
-
-To view **Single Stat** controls, click the settings icon ({{< icon "gear" >}}) next to the visualization dropdown in the upper right.
-
-* **Customize Single-Stat** section:
- * **Prefix**: Prefix to be added to the single stat.
- * **Suffix**: Suffix to be added to the single stat.
- * **Decimal Places**: The number of decimal places to display for the single stat.
- - **Auto** or **Custom**: Enable or disable auto-setting.
-
-* **Colorized Thresholds** section:
- * **Base Color**: Select a base, or background, color from the selection list.
- * **Add a Threshold**: Change the color of the single stat based on the current value.
- * **Value is**: Enter the value at which the single stat should appear in the selected color. Choose a color from the dropdown menu next to the value.
- * **Colorization**: Choose **Text** for the single stat to change color based on the configured thresholds. Choose **Background** for the background of the graph to change color based on the configured thresholds.
-
-### Gauge
-
-The **Gauge** view displays the single value most recent value for a time series in a gauge view.
-
-To select this view, select the **Gauge** option from the visualization dropdown in the upper right.
-
-#### Gauge Controls
-
-To view **Gauge** controls, click the settings icon ({{< icon "gear" >}}) next to the visualization dropdown in the upper right.
-
-* **Customize Gauge** section:
- * **Prefix**: Prefix to be added to the gauge.
- * **Suffix**: Suffix to be added to the gauge.
- * **Decimal Places**: The number of decimal places to display for the the gauge.
- - **Auto** or **Custom**: Enable or disable auto-setting.
-
-* **Colorized Thresholds** section:
- * **Base Color**: Select a base, or background, color from the selection list.
- * **Add a Threshold**: Change the color of the gauge based on the current value.
- * **Value is**: Enter the value at which the gauge should appear in the selected color. Choose a color from the dropdown menu next to the value.
-
-#### Gauge example
-
-{{< img-hd src="/img/2-0-visualizations-gauge-example.png" alt="Gauge example" />}}
-
-### Table
-
-The **Table** option displays the results of queries in a tabular view, which is sometimes easier to analyze than graph views of data.
-
-To select this view, select the **Table** option from the visualization dropdown in the upper right.
-
-#### Table Controls
-
-To view **Table** controls, click the settings icon ({{< icon "gear" >}}) next to the visualization dropdown in the upper right.
-
-* **Customize Table** section:
- * **Default Sort Field**: Select the default sort field. Default is **time**.
- * **Time Format**: Select the time format.
- - Options include: `MM/DD/YYYY HH:mm:ss` (default), `MM/DD/YYYY HH:mm:ss.SSS`, `YYYY-MM-DD HH:mm:ss`, `HH:mm:ss`, `HH:mm:ss.SSS`, `MMMM D, YYYY HH:mm:ss`, `dddd, MMMM D, YYYY HH:mm:ss`, or `Custom`.
- * **Default Sort Field**: Select the default sort field. Default is **time**.
- * **Decimal Places**: Enter the number of decimal places. Default (empty field) is **unlimited**.
- - **Auto** or **Custom**: Enable or disable auto-setting.
-
-* **Column Settings** section:
- * **First Column**: Toggle to **Fixed** to lock the first column so that the listings are always visible. Threshold settings do not apply in the first column when locked.
- * **Table Columns**:
- - Enter a new name to rename any of the columns.
- - Click the eye icon next to a column to hide it.
- - [additional]: Enter name for each additional column.
- - Change the order of the columns by dragging to the desired position.
-
-* **Colorized Thresholds** section:
- * **Base Color**: Select a base, or background, color from the selection list.
- * **Add a Threshold**: Change the color of the table based on the current value.
- * **Value is**: Enter the value at which the table should appear in the selected color. Choose a color from the dropdown menu next to the value.
-
-#### Table view example
-
-{{< img-hd src="/img/2-0-visualizations-table-example.png" alt="Table example" />}}
diff --git a/content/v2.0/visualize-data/visualization-types/_index.md b/content/v2.0/visualize-data/visualization-types/_index.md
new file mode 100644
index 000000000..b6b713070
--- /dev/null
+++ b/content/v2.0/visualize-data/visualization-types/_index.md
@@ -0,0 +1,17 @@
+---
+title: Visualization types
+description: >
+ The InfluxDB UI provides multiple visualization types to visualize your data in
+ a format that makes to the most sense for your use case. Use to available customization
+ options to customize each visualization.
+menu:
+ v2_0:
+ parent: Visualize data
+weight: 101
+---
+
+The InfluxDB UI provides multiple visualization types to visualize your data in
+a format that makes to the most sense for your use case. Use to available customization
+options to customize each visualization.
+
+{{< children >}}
diff --git a/content/v2.0/visualize-data/visualization-types/gauge.md b/content/v2.0/visualize-data/visualization-types/gauge.md
new file mode 100644
index 000000000..e1c047c4e
--- /dev/null
+++ b/content/v2.0/visualize-data/visualization-types/gauge.md
@@ -0,0 +1,67 @@
+---
+title: Gauge visualization
+list_title: Gauge
+list_image: /img/2-0-visualizations-gauge-example.png
+description: >
+ The Gauge view displays the single value most recent value for a time series in a gauge view.
+weight: 206
+menu:
+ v2_0:
+ name: Gauge
+ parent: Visualization types
+---
+
+The **Gauge** visualization displays the most recent value for a time series in a gauge.
+
+{{< img-hd src="/img/2-0-visualizations-gauge-example.png" alt="Gauge example" />}}
+
+Select the **Gauge** option from the visualization dropdown in the upper right.
+
+## Gauge behavior
+The gauge visualization displays a single numeric data point within a defined spectrum (_default is 0-100_).
+It uses the latest point in the first table (or series) returned by the query.
+
+{{% note %}}
+#### Queries should return one table
+Flux does not guarantee the order in which tables are returned.
+If a query returns multiple tables (or series), the table order can change between query executions
+and result in the Gauge displaying inconsistent data.
+For consistent results, the Gauge query should return a single table.
+{{% /note %}}
+
+## Gauge Controls
+To view **Gauge** controls, click the settings icon ({{< icon "gear" >}}) next to
+the visualization dropdown in the upper right.
+
+- **Prefix**: Prefix to add to the gauge.
+- **Suffix**: Suffix to add to the gauge.
+- **Decimal Places**: The number of decimal places to display for the the gauge.
+ - **Auto** or **Custom**: Enable or disable auto-setting.
+
+###### Colorized Thresholds
+- **Base Color**: Select a base or background color from the selection list.
+- **Add a Threshold**: Change the color of the gauge based on the current value.
+ - **Value is**: Enter the value at which the gauge should appear in the selected color.
+ Choose a color from the dropdown menu next to the value.
+
+## Gauge examples
+Gauge visualizations are useful for showing the current value of a metric and displaying
+where it falls within a spectrum.
+
+### Steam pressure gauge
+The following example queries sensor data that tracks the pressure of steam pipes
+in a facility and displays it as a gauge.
+
+###### Query pressure data from a specific sensor
+```js
+from(bucket: "example-bucket")
+ |> range(start: -1m)
+ |> filter(fn: (r) =>
+ r._measurement == "steam-sensors" and
+ r._field == "psi"
+ r.sensorID == "a211i"
+ )
+```
+
+###### Visualization options for pressure gauge
+{{< img-hd src="/img/2-0-visualizations-guage-pressure.png" alt="Pressure guage example" />}}
diff --git a/content/v2.0/visualize-data/visualization-types/graph-single-stat.md b/content/v2.0/visualize-data/visualization-types/graph-single-stat.md
new file mode 100644
index 000000000..61ef0820d
--- /dev/null
+++ b/content/v2.0/visualize-data/visualization-types/graph-single-stat.md
@@ -0,0 +1,93 @@
+---
+title: Graph + Single Stat visualization
+list_title: Graph + Single Stat
+list_image: /img/2-0-visualizations-line-graph-single-stat-example.png
+description: >
+ The Graph + Single Stat view displays the specified time series in a line graph
+ and overlays the single most recent value as a large numeric value.
+weight: 202
+menu:
+ v2_0:
+ name: Graph + Single Stat
+ parent: Visualization types
+related:
+ - /v2.0/visualize-data/visualization-types/graph
+ - /v2.0/visualize-data/visualization-types/single-stat
+---
+
+The **Graph + Single Stat** view displays the specified time series in a line graph
+and overlays the single most recent value as a large numeric value.
+
+{{< img-hd src="/img/2-0-visualizations-line-graph-single-stat-example.png" alt="Line Graph + Single Stat example" />}}
+
+Select the **Graph + Single Stat** option from the visualization dropdown in the upper right.
+
+## Graph + Single Stat behavior
+The Graph visualization color codes each table (or series) in the queried data set.
+When multiple series are present, it automatically assigns colors based on the selected [Line Colors option](#options).
+
+The Single Stat visualization displays a single numeric data point.
+It uses the latest point in the first table (or series) returned by the query.
+
+{{% note %}}
+#### Queries should return one table
+Flux does not guarantee the order in which tables are returned.
+If a query returns multiple tables (or series), the table order can change between query executions
+and result in the Single Stat visualization displaying inconsistent data.
+For consistent Single Stat results, the query should return a single table.
+{{% /note %}}
+
+## Graph + Single Stat Controls
+To view **Graph + Single Stat** controls, click the settings icon ({{< icon "gear" >}})
+next to the visualization dropdown in the upper right.
+
+###### Data
+- **X Column**: Select a column to display on the x-axis.
+- **Y Column**: Select a column to display on the y-axis.
+
+###### Options
+- **Line Colors**: Select a color scheme to use for your graph.
+- **Shade Area Below Lines**: Shade in the area below the graph lines.
+
+###### Y Axis
+- **Y Axis Label**: Label for the y-axis.
+- **Y Tick Prefix**: Prefix to be added to y-value.
+- **Y Tick Suffix**: Suffix to be added to y-value.
+- **Y Axis Domain**: The y-axis value range.
+ - **Auto**: Automatically determine the value range based on values in the data set.
+ - **Custom**: Manually specify the value range of the y-axis.
+ - **Min**: Minimum y-axis value.
+ - **Max**: Maximum y-axis value.
+
+###### Customize Single-Stat
+- **Prefix**: Prefix to be added to the single stat.
+- **Suffix**: Suffix to be added to the single stat.
+- **Decimal Places**: The number of decimal places to display for the single stat.
+ - **Auto** or **Custom**: Enable or disable auto-setting.
+
+###### Colorized Thresholds
+- **Base Color**: Select a base or background color from the selection list.
+- **Add a Threshold**: Change the color of the single stat based on the current value.
+ - **Value is**: Enter the value at which the single stat should appear in the selected color.
+ Choose a color from the dropdown menu next to the value.
+- **Colorization**: Choose **Text** for the single stat to change color based on the configured thresholds.
+ Choose **Background** for the background of the graph to change color based on the configured thresholds.
+
+## Graph + Single Stat examples
+The primary use case for the Graph + Single Stat visualization is to show the current or latest
+value as well as historical values.
+
+### Show current value and historical values
+The following example shows the current percentage of memory used as well as memory usage over time:
+
+###### Query memory usage percentage
+```js
+from(bucket: "example-bucket")
+ |> range(start: v.timeRangeStart, stop: v.timeRangeStop)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+```
+###### Memory usage visualization
+{{< img-hd src="/img/2-0-visualizations-graph-single-stat-mem.png" alt="Graph + Single Stat Memory Usage Example" />}}
diff --git a/content/v2.0/visualize-data/visualization-types/graph.md b/content/v2.0/visualize-data/visualization-types/graph.md
new file mode 100644
index 000000000..b32cc360c
--- /dev/null
+++ b/content/v2.0/visualize-data/visualization-types/graph.md
@@ -0,0 +1,70 @@
+---
+title: Graph visualization
+list_title: Graph
+list_image: /img/2-0-visualizations-line-graph-example.png
+description: >
+ The Graph view lets you select from multiple graph types such as line graphs and bar graphs *(Coming)*.
+weight: 201
+menu:
+ v2_0:
+ name: Graph
+ parent: Visualization types
+---
+
+The Graph visualization provides several types of graphs, each configured through
+the [Graph controls](#graph-controls).
+
+{{< img-hd src="/img/2-0-visualizations-line-graph-example.png" alt="Line Graph example" />}}
+
+Select the **Graph** option from the visualization dropdown in the upper right.
+
+## Graph behavior
+The Graph visualization color codes each table (or series) in the queried data set.
+When multiple series are present, it automatically assigns colors based on the selected [Line Colors option](#options).
+
+When using a line graph, all points within a single table are connected. When multiple series are present, it automatically assigns colors based on the selected [Line Colors option](#options).
+
+## Graph controls
+To view **Graph** controls, click the settings icon ({{< icon "gear" >}}) next
+to the visualization dropdown in the upper right.
+
+###### Data
+- **X Column**: Select a column to display on the x-axis.
+- **Y Column**: Select a column to display on the y-axis.
+
+###### Options
+- **Interpolation**: Select from the following options:
+ - **Line**: Display a time series in a line graph
+ - **Smooth**: Display a time series in a line graph with smooth point interpolation.
+ - **Step**: Display a time series in a staircase graph.
+
+
+- **Line Colors**: Select a color scheme to use for your graph.
+- **Shade Area Below Lines**: Shade in the area below the graph lines.
+
+###### Y Axis
+- **Y Axis Label**: Label for the y-axis.
+- **Y Tick Prefix**: Prefix to be added to y-value.
+- **Y Tick Suffix**: Suffix to be added to y-value.
+- **Y Axis Domain**: The y-axis value range.
+ - **Auto**: Automatically determine the value range based on values in the data set.
+ - **Custom**: Manually specify the value range of the y-axis.
+ - **Min**: Minimum y-axis value.
+ - **Max**: Maximum y-axis value.
+
+## Graph Examples
+
+##### Graph with linear interpolation
+{{< img-hd src="/img/2-0-visualizations-line-graph-example.png" alt="Line Graph example" />}}
+
+##### Graph with smooth interpolation
+{{< img-hd src="/img/2-0-visualizations-line-graph-smooth-example.png" alt="Step-Plot Graph example" />}}
+
+##### Graph with step interpolation
+{{< img-hd src="/img/2-0-visualizations-line-graph-step-example.png" alt="Step-Plot Graph example" />}}
+
+
+
+
diff --git a/content/v2.0/visualize-data/visualization-types/heatmap.md b/content/v2.0/visualize-data/visualization-types/heatmap.md
new file mode 100644
index 000000000..eac4b2878
--- /dev/null
+++ b/content/v2.0/visualize-data/visualization-types/heatmap.md
@@ -0,0 +1,115 @@
+---
+title: Heatmap visualization
+list_title: Heatmap
+list_image: /img/2-0-visualizations-heatmap-example.png
+description: >
+ A Heatmap displays the distribution of data on an x and y axes where color
+ represents different concentrations of data points.
+weight: 203
+menu:
+ v2_0:
+ name: Heatmap
+ parent: Visualization types
+related:
+ - /v2.0/visualize-data/visualization-types/scatter
+---
+
+A **Heatmap** displays the distribution of data on an x and y axes where color
+represents different concentrations of data points.
+
+{{< img-hd src="/img/2-0-visualizations-heatmap-example.png" alt="Heatmap example" />}}
+
+Select the **Heatmap** option from the visualization dropdown in the upper right.
+
+## Heatmap behavior
+Heatmaps divide data points into "bins" – segments of the visualization with upper
+and lower bounds for both [X and Y axes](#data).
+The [Bin Size option](#options) determines the bounds for each bin.
+The total number of points that fall within a bin determine the its value and color.
+Warmer or brighter colors represent higher bin values or density of points within the bin.
+
+## Heatmap Controls
+To view **Heatmap** controls, click the settings icon ({{< icon "gear" >}})
+next to the visualization dropdown in the upper right.
+
+###### Data
+- **X Column**: Select a column to display on the x-axis.
+- **Y Column**: Select a column to display on the y-axis.
+
+###### Options
+- **Color Scheme**: Select a color scheme to use for your heatmap.
+- **Bin Size**: Specify the size of each bin. Default is 10.
+
+###### X Axis
+- **X Axis Label**: Label for the x-axis.
+- **X Tick Prefix**: Prefix to be added to x-value.
+- **X Tick Suffix**: Suffix to be added to x-value.
+- **X Axis Domain**: The x-axis value range.
+ - **Auto**: Automatically determine the value range based on values in the data set.
+ - **Custom**: Manually specify the value range of the x-axis.
+ - **Min**: Minimum x-axis value.
+ - **Max**: Maximum x-axis value.
+
+###### Y Axis
+- **Y Axis Label**: Label for the y-axis.
+- **Y Tick Prefix**: Prefix to be added to y-value.
+- **Y Tick Suffix**: Suffix to be added to y-value.
+- **Y Axis Domain**: The y-axis value range.
+ - **Auto**: Automatically determine the value range based on values in the data set.
+ - **Custom**: Manually specify the value range of the y-axis.
+ - **Min**: Minimum y-axis value.
+ - **Max**: Maximum y-axis value.
+
+## Heatmap examples
+
+### Cross-measurement correlation
+The following example explores possible correlation between CPU and Memory usage.
+It uses data collected with the Telegraf [Mem](/v2.0/reference/telegraf-plugins/#mem)
+and [CPU](/v2.0/reference/telegraf-plugins/#cpu) input plugins.
+
+###### Join CPU and memory usage
+The following query joins CPU and memory usage on `_time`.
+Each row in the output table contains `_value_cpu` and `_value_mem` columns.
+
+```js
+cpu = from(bucket: "example-bucket")
+ |> range(start: v.timeRangeStart, stop: v.timeRangeStop)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system" and
+ r.cpu == "cpu-total"
+ )
+
+mem = from(bucket: "example-bucket")
+ |> range(start: v.timeRangeStart, stop: v.timeRangeStop)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+
+join(tables: {cpu: cpu, mem: mem}, on: ["_time"], method: "inner")
+```
+
+###### Use a heatmap to visualize correlation
+In the Heatmap visualization controls, `_value_cpu` is selected as the [X Column](#data)
+and `_value_mem` is selected as the [Y Column](#data).
+The domain for each axis is also customized to account for the scale difference
+between column values.
+
+{{< img-hd src="/img/2-0-visualizations-heatmap-correlation.png" alt="Heatmap correlation example" />}}
+
+
+## Important notes
+
+### Differences between a heatmap and a scatter plot
+Heatmaps and [Scatter plots](/v2.0/visualize-data/visualization-types/scatter/)
+both visualize the distribution of data points on X and Y axes.
+However, in certain cases, heatmaps provide better visibility into point density.
+
+For example, the dashboard cells below visualize the same query results:
+
+{{< img-hd src="/img/2-0-visualizations-heatmap-vs-scatter.png" alt="Heatmap vs Scatter plot" />}}
+
+The heatmap indicates isolated high point density, which isn't visible in the scatter plot.
+In the scatter plot visualization, points that share the same X and Y coordinates
+appear as a single point.
diff --git a/content/v2.0/visualize-data/visualization-types/histogram.md b/content/v2.0/visualize-data/visualization-types/histogram.md
new file mode 100644
index 000000000..0c186e18c
--- /dev/null
+++ b/content/v2.0/visualize-data/visualization-types/histogram.md
@@ -0,0 +1,80 @@
+---
+title: Histogram visualization
+list_title: Histogram
+list_image: /img/2-0-visualizations-histogram-example.png
+description: >
+ A histogram is a way to view the distribution of data.
+ The y-axis is dedicated to count, and the x-axis is divided into bins.
+weight: 204
+menu:
+ v2_0:
+ name: Histogram
+ parent: Visualization types
+---
+
+A histogram is a way to view the distribution of data.
+The y-axis is dedicated to count, and the X-axis is divided into bins.
+
+{{< img-hd src="/img/2-0-visualizations-histogram-example.png" alt="Histogram example" />}}
+
+Select the **Histogram** option from the visualization dropdown in the upper right.
+
+## Histogram behavior
+The Histogram visualization is a bar graph that displays the number of data points
+that fall within "bins" – segments of the X axis with upper and lower bounds.
+Bin thresholds are determined by dividing the width of the X axis by the number
+of bins set using the [Bins option](#options).
+Data within bins can be further grouped or segmented by selecting columns in the
+[Group By option](#options).
+
+{{% note %}}
+The Histogram visualization automatically bins, segments, and counts data.
+To work properly, query results **should not** be structured as histogram data.
+{{% /note %}}
+
+## Histogram Controls
+To view **Histogram** controls, click the settings icon ({{< icon "gear" >}}) next
+to the visualization dropdown in the upper right.
+
+###### Data
+- **X Column**: The column to select data from.
+- **Group By**: The column to group by.
+
+###### Options
+- **Color Scheme**: Select a color scheme to use for your graph.
+- **Positioning**: Select **Stacked** to stack groups in a bin on top of each other.
+ Select **Overlaid** to overlay groups in each bin.
+- **Bins**: Enter a number of bins to divide data into or select Auto to automatically
+ calculate the number of bins.
+ - **Auto** or **Custom**: Enable or disable auto-setting.
+
+###### X Axis
+- **X Axis Label**: Label for the x-axis.
+- **X Axis Domain**: The x-axis value range.
+ - **Auto**: Automatically determine the value range based on values in the data set.
+ - **Custom**: Manually specify the value range of the x-axis.
+ - **Min**: Minimum x-axis value.
+ - **Max**: Maximum x-axis value.
+
+## Histogram examples
+
+### View error counts by severity over time
+The following example uses the Histogram visualization to show the number of errors
+"binned" by time and segmented by severity.
+_It utilizes data from the [Telegraf Syslog plugin](/v2.0/reference/telegraf-plugins/#syslog)._
+
+##### Query for errors by severity code
+```js
+from(bucket: "example-bucket")
+ |> range(start: v.timeRangeStart, stop: v.timeRangeStop)
+ |> filter(fn: (r) =>
+ r._measurement == "syslog" and
+ r._field == "severity_code"
+ )
+```
+
+##### Histogram settings
+In the Histogram visualization options, select `_time` as the [X Column](#data)
+and `severity` as the [Group By](#data) option:
+
+{{< img-hd src="/img/2-0-visualizations-histogram-errors.png" alt="Errors histogram" />}}
diff --git a/content/v2.0/visualize-data/visualization-types/scatter.md b/content/v2.0/visualize-data/visualization-types/scatter.md
new file mode 100644
index 000000000..ad54dcb0b
--- /dev/null
+++ b/content/v2.0/visualize-data/visualization-types/scatter.md
@@ -0,0 +1,109 @@
+---
+title: Scatter visualization
+list_title: Scatter
+list_image: /img/2-0-visualizations-scatter-example.png
+description: >
+ The Scatter view uses a scatter plot to display time series data.
+weight: 208
+menu:
+ v2_0:
+ name: Scatter
+ parent: Visualization types
+related:
+ - /v2.0/visualize-data/visualization-types/heatmap
+---
+
+The **Scatter** view uses a scatter plot to display time series data.
+
+{{< img-hd src="/img/2-0-visualizations-scatter-example.png" alt="Scatter plot example" />}}
+
+Select the **Scatter** option from the visualization dropdown in the upper right.
+
+## Scatter behavior
+The scatter visualization maps each data point to X and Y coordinates.
+X and Y axes are specified with the [X Column](#data) and [Y Column](#data) visualization options.
+Each unique series is differentiated using fill colors and symbols.
+Use the [Symbol Column](#data) and [Fill Column](#data) options to select columns
+used to differentiate points in the visualization.
+
+## Scatter controls
+To view **Scatter** controls, click the settings icon ({{< icon "gear" >}}) next
+to the visualization dropdown in the upper right.
+
+###### Data
+- **Symbol Column**: Define a column containing values that should be differentiated with symbols.
+- **Fill Column**: Define a column containing values that should be differentiated with fill color.
+- **X Column**: Select a column to display on the x-axis.
+- **Y Column**: Select a column to display on the y-axis.
+
+###### Options
+- **Color Scheme**: Select a color scheme to use for your scatter plot.
+
+###### X Axis
+- **X Axis Label**: Label for the x-axis.
+
+###### Y Axis
+- **Y Axis Label**: Label for the y-axis.
+- **Y Tick Prefix**: Prefix to be added to y-value.
+- **Y Tick Suffix**: Suffix to be added to y-value.
+- **Y Axis Domain**: The y-axis value range.
+ - **Auto**: Automatically determine the value range based on values in the data set.
+ - **Custom**: Manually specify the value range of the y-axis.
+ - **Min**: Minimum y-axis value.
+ - **Max**: Maximum y-axis value.
+
+## Scatter examples
+
+### Cross-measurement correlation
+The following example explores possible correlation between CPU and Memory usage.
+It uses data collected with the Telegraf [Mem](/v2.0/reference/telegraf-plugins/#mem)
+and [CPU](/v2.0/reference/telegraf-plugins/#cpu) input plugins.
+
+###### Query CPU and memory usage
+The following query creates a union of CPU and memory usage.
+It scales the CPU usage metric to better align with baseline memory usage.
+
+```js
+cpu = from(bucket: "example-bucket")
+ |> range(start: v.timeRangeStart, stop: v.timeRangeStop)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system" and
+ r.cpu == "cpu-total"
+ )
+ // Scale CPU usage
+ |> map(fn: (r) => ({
+ _value: r._value + 60.0,
+ _time: r._time
+ })
+ )
+
+mem = from(bucket: "example-bucket")
+ |> range(start: v.timeRangeStart, stop: v.timeRangeStop)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+
+union(tables: [cpu, mem])
+```
+
+###### Use a scatter plot to visualize correlation
+In the Scatter visualization controls, points are differentiated based on their group keys.
+
+{{< img-hd src="/img/2-0-visualizations-scatter-correlation.png" alt="Heatmap correlation example" />}}
+
+## Important notes
+
+### Differences between a scatter plot and a heatmap
+Scatter plots and [Heatmaps](/v2.0/visualize-data/visualization-types/heatmap/)
+both visualize the distribution of data points on X and Y axes.
+However, in certain cases, scatterplots can "hide" points if they share the same X and Y coordinates.
+
+For example, the dashboard cells below visualize the same query results:
+
+{{< img-hd src="/img/2-0-visualizations-heatmap-vs-scatter.png" alt="Heatmap vs Scatter plot" />}}
+
+The heatmap indicates isolated high point density, which isn't visible in the scatter plot.
+In the scatter plot visualization, points that share the same X and Y coordinates
+appear as a single point.
diff --git a/content/v2.0/visualize-data/visualization-types/single-stat.md b/content/v2.0/visualize-data/visualization-types/single-stat.md
new file mode 100644
index 000000000..10c49e303
--- /dev/null
+++ b/content/v2.0/visualize-data/visualization-types/single-stat.md
@@ -0,0 +1,65 @@
+---
+title: Single Stat visualization
+list_title: Single stat
+list_image: /img/2-0-visualizations-single-stat-example.png
+description: >
+ The Single Stat view displays the most recent value of the specified time series as a numerical value.
+weight: 205
+menu:
+ v2_0:
+ name: Single Stat
+ parent: Visualization types
+---
+
+The **Single Stat** view displays the most recent value of the specified time series as a numerical value.
+
+{{< img-hd src="/img/2-0-visualizations-single-stat-example.png" alt="Single stat example" />}}
+
+Select the **Single Stat** option from the visualization dropdown in the upper right.
+
+## Single Stat behavior
+The Single Stat visualization displays a single numeric data point.
+It uses the latest point in the first table (or series) returned by the query.
+
+{{% note %}}
+#### Queries should return one table
+Flux does not guarantee the order in which tables are returned.
+If a query returns multiple tables (or series), the table order can change between query executions
+and result in the Single Stat visualization displaying inconsistent data.
+For consistent results, the Single Stat query should return a single table.
+{{% /note %}}
+
+## Single Stat Controls
+To view **Single Stat** controls, click the settings icon ({{< icon "gear" >}})
+next to the visualization dropdown in the upper right.
+
+- **Prefix**: Prefix to be added to the single stat.
+- **Suffix**: Suffix to be added to the single stat.
+- **Decimal Places**: The number of decimal places to display for the single stat.
+ - **Auto** or **Custom**: Enable or disable auto-setting.
+
+###### Colorized Thresholds
+- **Base Color**: Select a base or background color from the selection list.
+- **Add a Threshold**: Change the color of the single stat based on the current value.
+ - **Value is**: Enter the value at which the single stat should appear in the selected color.
+ Choose a color from the dropdown menu next to the value.
+- **Colorization**: Choose **Text** for the single stat to change color based on the configured thresholds.
+ Choose **Background** for the background of the graph to change color based on the configured thresholds.
+
+## Single Stat examples
+
+### Show human-readable current value
+The following example shows the current memory usage displayed has a human-readable percentage:
+
+###### Query memory usage percentage
+```js
+from(bucket: "example-bucket")
+ |> range(start: v.timeRangeStart, stop: v.timeRangeStop)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+```
+
+###### Memory usage as a single stat
+{{< img-hd src="/img/2-0-visualizations-single-stat-memor.png" alt="Graph + Single Stat Memory Usage Example" />}}
diff --git a/content/v2.0/visualize-data/visualization-types/table.md b/content/v2.0/visualize-data/visualization-types/table.md
new file mode 100644
index 000000000..d38254257
--- /dev/null
+++ b/content/v2.0/visualize-data/visualization-types/table.md
@@ -0,0 +1,83 @@
+---
+title: Table visualization
+list_title: Table
+list_image: /img/2-0-visualizations-table-example.png
+description: >
+ The Table option displays the results of queries in a tabular view, which is
+ sometimes easier to analyze than graph views of data.
+weight: 207
+menu:
+ v2_0:
+ name: Table
+ parent: Visualization types
+---
+
+The **Table** option displays the results of queries in a tabular view, which is
+sometimes easier to analyze than graph views of data.
+
+{{< img-hd src="/img/2-0-visualizations-table-example.png" alt="Table example" />}}
+
+Select the **Table** option from the visualization dropdown in the upper right.
+
+## Table behavior
+The table visualization renders queried data in structured, easy-to-read tables.
+Columns and rows match those in the query output.
+If query results contain multiple tables, only one table is shown at a time.
+Select other output tables in the far left column of the table visualization.
+Tables are identified by their [group key](/v2.0/query-data/get-started/#group-keys).
+
+## Table Controls
+To view **Table** controls, click the settings icon ({{< icon "gear" >}}) next to
+the visualization dropdown in the upper right.
+
+- **Default Sort Field**: Select the default sort field. Default is **time**.
+- **Time Format**: Select the time format. Options include:
+ - `MM/DD/YYYY HH:mm:ss` (default)
+ - `MM/DD/YYYY HH:mm:ss.SSS`
+ - `YYYY-MM-DD HH:mm:ss`
+ - `HH:mm:ss`
+ - `HH:mm:ss.SSS`
+ - `MMMM D, YYYY HH:mm:ss`
+ - `dddd, MMMM D, YYYY HH:mm:ss`
+ - `Custom`
+- **Decimal Places**: Enter the number of decimal places. Default (empty field) is **unlimited**.
+ - **Auto** or **Custom**: Enable or disable auto-setting.
+
+###### Column Settings
+- **First Column**: Toggle to **Fixed** to lock the first column so that the listings are always visible.
+ Threshold settings do not apply in the first column when locked.
+- **Table Columns**:
+ - Enter a new name to rename any of the columns.
+ - Click the eye icon next to a column to hide it.
+ - [additional]: Enter name for each additional column.
+ - Change the order of the columns by dragging to the desired position.
+
+###### Colorized Thresholds
+- **Base Color**: Select a base or background color from the selection list.
+- **Add a Threshold**: Change the color of the table based on the current value.
+ - **Value is**: Enter the value at which the table should appear in the selected color.
+ Choose a color from the dropdown menu next to the value.
+
+## Table examples
+Tables are helpful when displaying many human-readable metrics in a dashboard
+such as cluster statistics or log messages.
+
+### Human-readable cluster metrics
+The following example queries the latest reported memory usage from a cluster of servers.
+
+###### Query the latest memory usage from each host
+```js
+from(bucket: "example-bucket")
+ |> range(start: v.timeRangeStart, stop: v.timeRangeStop)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> group(columns: ["host"])
+ |> last()
+ |> group()
+ |> keep(columns: ["_value", "host"])
+```
+
+###### Cluster metrics in a table
+{{< img-hd src="/img/2-0-visualizations-table-human-readable.png" alt="Human readable metrics in a table" />}}
diff --git a/content/v2.0/write-data/_index.md b/content/v2.0/write-data/_index.md
index 53e5756bd..e26089492 100644
--- a/content/v2.0/write-data/_index.md
+++ b/content/v2.0/write-data/_index.md
@@ -15,30 +15,48 @@ Collect and write time series data to InfluxDB using [line protocol](/v2.0/refer
Telegraf, data scrapers, the InfluxDB v2 API, `influx` command line interface (CLI),
the InfluxDB user interface (UI), and client libraries.
-- [Requirements to write data](#requirements-to-write-data)
-- [InfluxDB v2 API](#write-data-using-the-influxdb-v2-api)
-- [influx CLI](#write-data-using-the-influx-cli)
-- [InfluxDB UI](#write-data-in-the-influxdb-ui)
-- [Other ways to write data to InfluxDB](#other-ways-to-write-data-to-influxdb)
+- [What you'll need](#what-you-ll-need)
+- [Ways to write data into InfluxDB](#ways-to-write-data-into-influxdb)
+ - [User Interface](#user-interface)
+ - [influx CLI](#influx-cli)
+ - [InfluxDB API](#influxdb-api)
+ - [Others](#others)
-## Requirements to write data
-To write data to InfluxDB, you must have an organization, bucket, authentication token,
-and data formatted in line protocol.
+### What you'll need
-### Organization
-The organization in which to write data.
-Use your organization name or ID.
+To write data into InfluxDB, you need the following:
-### Bucket
-The bucket in which to write data.
-Use the bucket name or ID.
-The bucket must belong to the specified organization.
+- an organization
+{{% note %}}
+See [View organizations](/v2.0/organizations/view-orgs/#view-your-organization-id/) for instructions on viewing your organization ID.
+{{% /note %}}
+- a bucket
+{{% note %}}
+See [View buckets](/v2.0/organizations/view-orgs/#view-your-organization-id/) for instructions on viewing your bucket ID.
+{{% /note %}}
+- an [authentication token](/v2.0/security/tokens/view-tokens/)
-### Precision
-The precision of timestamps provided in the line protocol.
-Default timestamp precision is in nanoseconds.
+The [InfluxDB setup process](/v2.0/get-started/#set-up-influxdb) creates each of these.
-If the precision of the timestamps is anything other than nanoseconds (ns),
+Use _line protocol_ format to write data into InfluxDB.
+Each line represents a data point.
+Each point requires a [*measurement*](/v2.0/reference/line-protocol/#measurement)
+and [*field set*](/v2.0/reference/line-protocol/#field-set) and may also include
+a [*tag set*](/v2.0/reference/line-protocol/#tag-set) and a [*timestamp*](/v2.0/reference/line-protocol/#timestamp).
+
+Line protocol data looks like this:
+
+```sh
+mem,host=host1 used_percent=23.43234543 1556892576842902000
+cpu,host=host1 usage_user=3.8234,usage_system=4.23874 1556892726597397000
+mem,host=host1 used_percent=21.83599203 1556892777007291000
+```
+
+Timestamps are essential in InfluxDB.
+If a data point does not include a timestamp when it is received by the database, InfluxDB uses the current system time (UTC) of its host machine.
+
+The default precision for timestamps is in nanoseconds.
+If the precision of the timestamps is anything other than nanoseconds (`ns`),
you must specify the precision in your write request.
InfluxDB accepts the following precisions:
@@ -47,52 +65,69 @@ InfluxDB accepts the following precisions:
- `ms` - Milliseconds
- `s` - Seconds
-### Authentication token
-All InfluxDB write interactions require an [authentication token](http://localhost:1313/v2.0/security/tokens/).
+_For more details about line protocol, see the [Line protocol reference](/v2.0/reference/line-protocol) and [Best practices for writing data](/v2.0/write-data/best-practices/)._
-### Line protocol
-Use line protocol to write data points to InfluxDB.
-Each line represents a data point.
-Each point requires a [measurement](/v2.0/reference/line-protocol/#measurement)
-and [field set](/v2.0/reference/line-protocol/#field-set) but can also include
-a [tag set](/v2.0/reference/line-protocol/#tag-set) and a [timestamp](/v2.0/reference/line-protocol/#timestamp).
+## Ways to write data into InfluxDB
-{{% note %}}
-_If a data point does not include a timestamp, InfluxDB uses the system time (UTC)
-of its host machine when it receives the data point._
-{{% /note %}}
+To write data into InfluxDB, use one of the following methods:
-##### Example line protocol
-```sh
-mem,host=host1 used_percent=23.43234543 1556892576842902000
-cpu,host=host1 usage_user=3.8234,usage_system=4.23874 1556892726597397000
-mem,host=host1 used_percent=21.83599203 1556892777007291000
-```
+- [User Interface](#user-interface)
+- [influx CLI](#influx-cli)
+- [InfluxDB API](#influxdb-api)
-_For details about line protocol, see the [Line protocol reference](/v2.0/reference/line-protocol) ._
-
+### User Interface
-## Write data using the InfluxDB v2 API
-Use the InfluxDB API `/write` endpoint to write data to InfluxDB.
-Include the following in your request:
+To quickly start writing data, use the provided user interface.
-| Requirement | Include by |
-|:----------- |:---------- |
-| Organization | Use the `org` query parameter in your request URL. |
-| Bucket | Use the `bucket` query parameter in your request URL. |
-| Precision | Use the `precision` query parameter in your request URL. |
-| Authentication token | Use the `Authorization: Token` header. |
-| Line protocol | Pass as plain text in your request body. |
+1. Do one of the following:
+ - _InfluxDB 2.0 OSS users_:
+ In your terminal, run `influxd` and then in your browser, go to the location where you're hosting the UI (by default, localhost:9999).
+ - _InfluxDB 2.0 Cloud users_:
+ In your browser, go to https://cloud2.influxdata.com/.
+2. Click **Load Data** in the navigation menu on the left.
+3. Select **Buckets**.
+4. Under the bucket you want to write data to, click **{{< icon "plus" >}} Add Data**.
+5. Select from the following options:
-###### Example API write request
-```sh
-curl "http://localhost:9999/api/v2/write?org=YOUR_ORG&bucket=YOUR_BUCKET&precision=s" \
- --header "Authorization: Token YOURAUTHTOKEN" \
- --data-raw "mem,host=host1 used_percent=23.43234543 1556896326"
-```
+ - [Configure Telegraf Agent](#configure-telegraf-agent)
+ - [Line Protocol](#line-protocol)
+ - [Scrape Metrics](#scrape-metrics)
-## Write data using the influx CLI
-Use the [`influx write` command](/v2.0/reference/cli/influx/write/) to write data to InfluxDB.
+---
+
+#### Configure Telegraf Agent
+
+To configure a Telegraf agent, see [Automatically create a Telegraf configuration](/v2.0/write-data/use-telegraf/auto-config/#create-a-telegraf-configuration).
+
+---
+
+#### Line Protocol
+
+1. Select **Upload File** or **Enter Manually**.
+ - **Upload File:**
+ Select the time precision of your data.
+ Drag and drop the line protocol file into the UI or click to select the
+ file from your file manager.
+ - **Enter Manually:**
+ Select the time precision of your data.
+ Manually enter line protocol.
+2. Click **Continue**.
+ A message indicates whether data is successfully written to InfluxDB.
+3. To add more data or correct line protocol, click **Previous**.
+4. Click **Finish**.
+
+---
+
+#### Scrape Metrics
+
+To scrape metrics, see [Create a scraper](/v2.0/write-data/scrape-data/manage-scrapers/create-a-scraper/#create-a-scraper-in-the-influxdb-ui).
+
+{{% cloud-msg %}}{{< cloud-name >}} does not support scrapers.
+{{% /cloud-msg %}}
+
+### influx CLI
+
+From the command line, use the [`influx write` command](/v2.0/reference/cli/influx/write/) to write data to InfluxDB.
Include the following in your command:
| Requirement | Include by |
@@ -103,45 +138,48 @@ Include the following in your command:
| Authentication token | Set the `INFLUX_TOKEN` environment variable or use the `t`, `--token` flag. |
| Line protocol | Write a single line as a string or pass a file path prefixed with `@`. |
-
##### Example influx write commands
-```sh
-# Write a single data point
-influx write -b bucketName -o orgName -p s 'myMeasurement,host=myHost testField="testData" 1556896326'
-# Write line protocol from a file
+To write a single data point, for example, run
+
+```sh
+influx write -b bucketName -o orgName -p s 'myMeasurement,host=myHost testField="testData" 1556896326'
+```
+
+To write data in line protocol from a file, try
+
+```
influx write -b bucketName -o orgName -p s @/path/to/line-protocol.txt
```
-## Write data in the InfluxDB UI
-1. Click **Settings** in the left navigation menu.
+### InfluxDB API
- {{< nav-icon "settings" >}}
+Write data to InfluxDB using an HTTP request to the InfluxDB API `/write` endpoint.
+Include the following in your request:
-2. Select the **Buckets** tab.
-3. Hover over the bucket to write data to and click **{{< icon "plus" >}} Add Data**.
-4. Select **Line Protocol**.
- _You can also [use Telegraf](/v2.0/write-data/use-telegraf/) or
- [scrape data](/v2.0/write-data/scrape-data/)._
-5. Select **Upload File** or **Enter Manually**.
+| Requirement | Include by |
+|:----------- |:---------- |
+| Organization | Use the `org` query parameter in your request URL. |
+| Bucket | Use the `bucket` query parameter in your request URL. |
+| Precision | Use the `precision` query parameter in your request URL. |
+| Authentication token | Use the `Authorization: Token` header. |
+| Line protocol | Pass as plain text in your request body. |
- - **Upload File:**
- Select the time precision of your data.
- Drag and drop the line protocol file into the UI or click to select the
- file from your file manager.
- - **Enter Manually:**
- Select the time precision of your data.
- Manually enter line protocol.
+##### Example API write request
-6. Click **Continue**.
- A message indicates whether data is successfully written to InfluxDB.
-7. To add more data or correct line protocol, click **Previous**.
-8. Click **Finish**.
+Below is an example API write request using `curl`:
-## Other ways to write data to InfluxDB
+```sh
+curl -XPOST "http://localhost:9999/api/v2/write?org=YOUR_ORG&bucket=YOUR_BUCKET&precision=s" \
+ --header "Authorization: Token YOURAUTHTOKEN" \
+ --data-raw "mem,host=host1 used_percent=23.43234543 1556896326"
+```
+
+### Others
{{< children >}}
### InfluxDB client libraries
+
Use language-specific client libraries to integrate with the InfluxDB v2 API.
See [Client libraries reference](/v2.0/reference/client-libraries/) for more information.
diff --git a/content/v2.0/write-data/best-practices/_index.md b/content/v2.0/write-data/best-practices/_index.md
new file mode 100644
index 000000000..e5da4c009
--- /dev/null
+++ b/content/v2.0/write-data/best-practices/_index.md
@@ -0,0 +1,17 @@
+---
+title: Best practices for writing data
+seotitle: Best practices for writing data to InfluxDB
+description: >
+ Learn about the recommendations and best practices for writing data to InfluxDB.
+weight: 105
+menu:
+ v2_0:
+ name: Best practices
+ identifier: write-best-practices
+ parent: Write data
+---
+
+The following articles walk through recommendations and best practices for writing
+data to InfluxDB.
+
+{{< children >}}
diff --git a/content/v2.0/write-data/best-practices/duplicate-points.md b/content/v2.0/write-data/best-practices/duplicate-points.md
new file mode 100644
index 000000000..6dcde2152
--- /dev/null
+++ b/content/v2.0/write-data/best-practices/duplicate-points.md
@@ -0,0 +1,131 @@
+---
+title: Handle duplicate data points
+seotitle: Handle duplicate data points when writing to InfluxDB
+description: >
+ InfluxDB identifies unique data points by their measurement, tag set, and timestamp.
+ This article discusses methods for preserving data from two points with a common
+ measurement, tag set, and timestamp but a different field set.
+weight: 202
+menu:
+ v2_0:
+ name: Handle duplicate points
+ parent: write-best-practices
+v2.0/tags: [best practices, write]
+---
+
+InfluxDB identifies unique data points by their measurement, tag set, and timestamp
+(each a part of [Line protocol](/v2.0/reference/line-protocol) used to write data to InfluxDB).
+
+```txt
+web,host=host2,region=us_west firstByte=15.0 1559260800000000000
+--- ------------------------- -------------------
+ | | |
+Measurement Tag set Timestamp
+```
+
+## Duplicate data points
+For points that have the same measurement name, tag set, and timestamp,
+InfluxDB creates a union of the old and new field sets.
+For any matching field keys, InfluxDB uses the field value of the new point.
+For example:
+
+```sh
+# Existing data point
+web,host=host2,region=us_west firstByte=24.0,dnsLookup=7.0 1559260800000000000
+
+# New data point
+web,host=host2,region=us_west firstByte=15.0 1559260800000000000
+```
+
+After you submit the new data point, InfluxDB overwrites `firstByte` with the new
+field value and leaves the field `dnsLookup` alone:
+
+```sh
+# Resulting data point
+web,host=host2,region=us_west firstByte=15.0,dnsLookup=7.0 1559260800000000000
+```
+
+```sh
+from(bucket: "example-bucket")
+ |> range(start: 2019-05-31T00:00:00Z, stop: 2019-05-31T12:00:00Z)
+ |> filter(fn: (r) => r._measurement == "web")
+
+Table: keys: [_measurement, host, region]
+ _time _measurement host region dnsLookup firstByte
+-------------------- ------------ ----- ------- --------- ---------
+2019-05-31T00:00:00Z web host2 us_west 7 15
+```
+
+## Preserve duplicate points
+To preserve both old and new field values in duplicate points, use one of the following strategies:
+
+- [Add an arbitrary tag](#add-an-arbitrary-tag)
+- [Increment the timestamp](#increment-the-timestamp)
+
+### Add an arbitrary tag
+Add an arbitrary tag with unique values so InfluxDB reads the duplicate points as unique.
+
+For example, add a `uniq` tag to each data point:
+
+```sh
+# Existing point
+web,host=host2,region=us_west,uniq=1 firstByte=24.0,dnsLookup=7.0 1559260800000000000
+
+# New point
+web,host=host2,region=us_west,uniq=2 firstByte=15.0 1559260800000000000
+```
+
+{{% note %}}
+It is not necessary to retroactively add the unique tag to the existing data point.
+Tag sets are evaluated as a whole.
+The arbitrary `uniq` tag on the new point allows InfluxDB to recognize it as a unique point.
+However, this causes the schema of the two points to differ and may lead to challenges when querying the data.
+{{% /note %}}
+
+After writing the new point to InfluxDB:
+
+```sh
+from(bucket: "example-bucket")
+ |> range(start: 2019-05-31T00:00:00Z, stop: 2019-05-31T12:00:00Z)
+ |> filter(fn: (r) => r._measurement == "web")
+
+Table: keys: [_measurement, host, region, uniq]
+ _time _measurement host region uniq firstByte dnsLookup
+-------------------- ------------ ----- ------- ---- --------- ---------
+2019-05-31T00:00:00Z web host2 us_west 1 24 7
+
+Table: keys: [_measurement, host, region, uniq]
+ _time _measurement host region uniq firstByte
+-------------------- ------------ ----- ------- ---- ---------
+2019-05-31T00:00:00Z web host2 us_west 2 15
+```
+
+### Increment the timestamp
+Increment the timestamp by a nanosecond to enforce the uniqueness of each point.
+
+```sh
+# Old data point
+web,host=host2,region=us_west firstByte=24.0,dnsLookup=7.0 1559260800000000000
+
+# New data point
+web,host=host2,region=us_west firstByte=15.0 1559260800000000001
+```
+
+After writing the new point to InfluxDB:
+
+```sh
+from(bucket: "example-bucket")
+ |> range(start: 2019-05-31T00:00:00Z, stop: 2019-05-31T12:00:00Z)
+ |> filter(fn: (r) => r._measurement == "web")
+
+Table: keys: [_measurement, host, region]
+ _time _measurement host region firstByte dnsLookup
+------------------------------ ------------ ----- ------- --------- ---------
+2019-05-31T00:00:00.000000000Z web host2 us_west 24 7
+2019-05-31T00:00:00.000000001Z web host2 us_west 15
+```
+
+{{% note %}}
+The output of examples queries in this article has been modified to clearly show
+the different approaches and results for handling duplicate data.
+{{% /note %}}
diff --git a/content/v2.0/write-data/best-practices/optimize-writes.md b/content/v2.0/write-data/best-practices/optimize-writes.md
new file mode 100644
index 000000000..20db613a8
--- /dev/null
+++ b/content/v2.0/write-data/best-practices/optimize-writes.md
@@ -0,0 +1,63 @@
+---
+title: Optimize writes to InfluxDB
+description: >
+ Simple tips to optimize performance and system overhead when writing data to InfluxDB.
+weight: 202
+menu:
+ v2_0:
+ parent: write-best-practices
+v2.0/tags: [best practices, write]
+---
+
+Use these tips to optimize performance and system overhead when writing data to InfluxDB.
+
+{{% note %}}
+The following tools write to InfluxDB and employ write optimizations by default:
+
+- [Telegraf](/v2.0/write-data/use-telegraf/)
+- [InfluxDB scrapers](/v2.0/write-data/scrape-data/)
+- [InfluxDB client libraries](/v2.0/reference/client-libraries/)
+{{% /note %}}
+
+---
+
+## Batch writes
+
+Write data in batches to Minimize network overhead when writing data to InfluxDB.
+
+{{% note %}}
+The optimal batch size is 5000 lines of line protocol.
+{{% /note %}}
+
+## Sort tags by key
+
+Before writing data points to InfluxDB, sort tags by key in lexicographic order.
+_Verify sort results match results from the [Go `bytes.Compare` function](http://golang.org/pkg/bytes/#Compare)._
+
+```sh
+# Line protocol example with unsorted tags
+measurement,tagC=therefore,tagE=am,tagA=i,tagD=i,tagB=think fieldKey=fieldValue 1562020262
+
+# Optimized line protocol example with tags sorted by key
+measurement,tagA=i,tagB=think,tagC=therefore,tagD=i,tagE=am fieldKey=fieldValue 1562020262
+```
+
+## Use the coarsest time precision possible
+
+InfluxDB lets you write data in nanosecond precision, however if data isn't
+collected in nanoseconds, there is no need to write at that precision.
+Using the coarsest precision possible for timestamps can result in significant
+compression improvements.
+
+_Specify timestamp precision when [writing to InfluxDB](/v2.0/write-data/#precision)._
+
+## Synchronize hosts with NTP
+
+Use the Network Time Protocol (NTP) to synchronize time between hosts.
+If a timestamp isn't included in line protocol, InfluxDB uses its host's local
+time (in UTC) to assign timestamps to each point.
+If a host's clocks isn't synchronized with NTP, timestamps may be inaccurate.
+
+## Write multiple data points in one request
+
+To write multiple lines in one request, each line of line protocol must be delimited by a new line (`\n`).
diff --git a/content/v2.0/write-data/scrape-data/manage-scrapers/create-a-scraper.md b/content/v2.0/write-data/scrape-data/manage-scrapers/create-a-scraper.md
index 75f521b0f..5abec8bb1 100644
--- a/content/v2.0/write-data/scrape-data/manage-scrapers/create-a-scraper.md
+++ b/content/v2.0/write-data/scrape-data/manage-scrapers/create-a-scraper.md
@@ -13,17 +13,18 @@ weight: 301
Create a new scraper in the InfluxDB user interface (UI).
## Create a scraper in the InfluxDB UI
-1. Click the **Settings** tab in the navigation bar.
+1. Click **Load Data** in the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "load data" >}}
-2. Click the **Scrapers** tab.
+2. Click **Scrapers**.
3. Click **{{< icon "plus" >}} Create Scraper**.
4. Enter a **Name** for the scraper.
5. Select a **Bucket** to store the scraped data.
-6. Enter the **Target URL** to scrape. The default URL value is `http://localhost:9999/metrics`,
+6. Enter the **Target URL** to scrape.
+ The default URL value is `http://localhost:9999/metrics`,
which provides InfluxDB-specific metrics in the [Prometheus data format](https://prometheus.io/docs/instrumenting/exposition_formats/).
-7. Click **Finish**.
+7. Click **Create**.
The new scraper will begin scraping data after approximately 10 seconds,
then continue scraping in 10 second intervals.
diff --git a/content/v2.0/write-data/scrape-data/manage-scrapers/delete-a-scraper.md b/content/v2.0/write-data/scrape-data/manage-scrapers/delete-a-scraper.md
index 646658dfc..e65ea251c 100644
--- a/content/v2.0/write-data/scrape-data/manage-scrapers/delete-a-scraper.md
+++ b/content/v2.0/write-data/scrape-data/manage-scrapers/delete-a-scraper.md
@@ -13,11 +13,10 @@ weight: 303
Delete a scraper from the InfluxDB user interface (UI).
## Delete a scraper from the InfluxDB UI
-1. Click the **Settings** tab in the navigation bar.
+1. Click **Load Data** in the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "load data" >}}
-2. Click the **Scrapers** tab. A listing of any existing scrapers appears with the
- **Name**, **URL**, and **BUCKET** for each scraper.
-3. Hover over the scraper you want to delete and click **Delete**.
-4. Click **Confirm**.
+2. Click **Scrapers**.
+3. Hover over the scraper you want to delete and click the **{{< icon "delete" >}}** icon.
+4. Click **Delete**.
diff --git a/content/v2.0/write-data/scrape-data/manage-scrapers/update-a-scraper.md b/content/v2.0/write-data/scrape-data/manage-scrapers/update-a-scraper.md
index b6cf1c67c..1edb06f29 100644
--- a/content/v2.0/write-data/scrape-data/manage-scrapers/update-a-scraper.md
+++ b/content/v2.0/write-data/scrape-data/manage-scrapers/update-a-scraper.md
@@ -13,16 +13,16 @@ weight: 302
Update a scraper in the InfluxDB user interface (UI).
{{% note %}}
-Scraper **Target URLs** and **BUCKETS** can not be updated.
+Scraper **Target URLs** and **Buckets** can not be updated.
To modify either, [create a new scraper](/v2.0/write-data/scrape-data/manage-scrapers/create-a-scraper).
{{% /note %}}
## Update a scraper in the InfluxDB UI
-1. Click the **Settings** tab in the navigation bar.
+1. Click **Load Data** in the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "load data" >}}
-2. Click the **Scrapers** tab. A list of existing scrapers appears.
+2. Click **Scrapers**.
3. Hover over the scraper you would like to update and click the **{{< icon "pencil" >}}**
that appears next to the scraper name.
4. Enter a new name for the scraper. Press Return or click out of the name field to save the change.
diff --git a/content/v2.0/write-data/use-telegraf/_index.md b/content/v2.0/write-data/use-telegraf/_index.md
index 87ccf6402..e1f9ff01d 100644
--- a/content/v2.0/write-data/use-telegraf/_index.md
+++ b/content/v2.0/write-data/use-telegraf/_index.md
@@ -13,7 +13,6 @@ menu:
v2_0:
name : Use Telegraf
parent: Write data
-
---
[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is InfluxData's
@@ -22,13 +21,15 @@ Its vast library of input plugins and "plug-and-play" architecture lets you quic
and easily collect metrics from many different sources.
This article describes how to use Telegraf to collect and store data in InfluxDB v2.0.
+For a list of available plugins, see [Telegraf plugins](/v2.0/reference/telegraf-plugins/).
+
#### Requirements
- **Telegraf 1.9.2 or greater**.
_For information about installing Telegraf, see the
[Telegraf Installation instructions](https://docs.influxdata.com/telegraf/latest/introduction/installation/)._
## Configure Telegraf
-All Telegraf input and output plugins are enabled and configured in Telegraf's configuration file (`telegraf.conf`).
+Telegraf input and output plugins are enabled and configured in Telegraf's configuration file (`telegraf.conf`).
You have the following options for configuring Telegraf:
{{< children >}}
diff --git a/content/v2.0/write-data/use-telegraf/auto-config/_index.md b/content/v2.0/write-data/use-telegraf/auto-config/_index.md
index aaca5632d..1bd83d4e5 100644
--- a/content/v2.0/write-data/use-telegraf/auto-config/_index.md
+++ b/content/v2.0/write-data/use-telegraf/auto-config/_index.md
@@ -12,11 +12,16 @@ menu:
weight: 201
---
-The InfluxDB user interface (UI) provides a workflow that automatically creates
+The InfluxDB user interface (UI) can automatically create
Telegraf configuration files based on user-selected Telegraf plugins.
This article describes how to create a Telegraf configuration in the InfluxDB UI and
start Telegraf using the generated configuration file.
+{{% note %}}
+Only a subset of plugins are configurable using the InfluxDB UI.
+To use plugins other than those listed, you must [manually configure Telegraf](/v2.0/write-data/use-telegraf/manual-config).
+{{% /note %}}
+
{{% note %}}
_View the [requirements](/v2.0/write-data/use-telegraf#requirements)
for using Telegraf with InfluxDB v2.0._
@@ -25,59 +30,114 @@ for using Telegraf with InfluxDB v2.0._
## Create a Telegraf configuration
1. Open the InfluxDB UI _(default: [localhost:9999](http://localhost:9999))_.
-2. Click **Organizations** in the left navigation menu.
+2. Click **Load Data** in the left navigation menu.
- {{< nav-icon "orgs" >}}
+ {{< nav-icon "load data" >}}
-3. Click on the name of your organization.
-4. Click the **Telegraf** tab.
-5. Click **{{< icon "plus" >}} Create Configuration**.
-6. In the **Bucket** dropdown, select the bucket where Telegraf will store collected data.
-7. Select one or more of the available plugin groups
- (e.g. **System**, **Docker**, **Kubernetes**, **NGINX**, or **Redis**), and click **Continue**.
- {{% note %}}
- All Telegraf plugins are supported, but only a subset are configurable using the InfluxDB UI.
- To use plugins other than those listed, you must [manually configure Telegraf](/v2.0/write-data/use-telegraf/manual-config).
- {{% /note %}}
-8. Review the list of **Plugins to Configure** for configuration requirements.
+3. Select **Telegrafs**.
+4. Click **{{< icon "plus" >}} Create Configuration**.
+5. In the **Bucket** dropdown, select the bucket where Telegraf will store collected data.
+6. Select one or more of the available plugin groups and click **Continue**.
+7. Review the list of **Plugins to Configure** for configuration requirements.
Plugins listed with a {{< icon "check" >}}
require no additional configuration.
To configure a plugin or access plugin documentation, click the plugin name.
-9. Provide a **Telegraf Configuration Name** and an optional **Telegraf Configuration Description**.
-10. Click **Create and Verify**.
-11. The **Test Your Configuration** page provides instructions for how to start
+8. Provide a **Telegraf Configuration Name** and an optional **Telegraf Configuration Description**.
+9. Click **Create and Verify**.
+10. The **Test Your Configuration** page provides instructions for how to start
Telegraf using the generated configuration.
_See [Start Telegraf](#start-telegraf) below for detailed information about what each step does._
-12. Once Telegraf is running, click **Listen for Data** to confirm Telegraf is successfully
+11. Once Telegraf is running, click **Listen for Data** to confirm Telegraf is successfully
sending data to InfluxDB.
Once confirmed, a **Connection Found!** message appears.
-13. Click **Finish**. Your configuration name and the associated bucket name appears
- in the list of Telegraf connections.
+12. Click **Finish**. Your Telegraf configuration name and the associated bucket name appears
+ in the list of Telegraf configurations.
+
+ {{% note %}}
+
+### Windows
+
+If you plan to monitor a Windows host using the System plugin, you must complete the following steps.
+
+1. In the list of Telegraf configurations, double-click your
+ Telegraf configuration, and then click **Download Config**.
+2. Open the downloaded Telegraf configuration file and replace the `[[inputs.processes]]` plugin with one of the following Windows plugins, depending on your Windows configuration:
+
+ - [`[[inputs.win_perf_counters]]`](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters)
+ - [`[[inputs.win_services]]`](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_services)
+
+3. Save the file and place it in a directory that **telegraf.exe** can access.
+
+ {{% /note %}}
## Start Telegraf
-### Configure your API token as an environment variable
Requests to the [InfluxDB v2 API](/v2.0/reference/api/) must include an authentication token.
A token identifies specific permissions to the InfluxDB instance.
-Define the `INFLUX_TOKEN` environment variable using your token.
-_For information about viewing tokens, see [View tokens](/v2.0/security/tokens/view-tokens/)._
+### Configure your token as an environment variable
+1. Find your authentication token. _For information about viewing tokens, see [View tokens](/v2.0/security/tokens/view-tokens/)._
+
+2. To configure your token as the `INFLUX_TOKEN` environment variable, run the command appropriate for your operating system and command-line tool:
+
+{{< tabs-wrapper >}}
+{{% tabs %}}
+[macOS or Linux](#)
+[Windows](#)
+{{% /tabs %}}
+
+{{% tab-content %}}
```sh
export INFLUX_TOKEN=YourAuthenticationToken
```
+{{% /tab-content %}}
+
+{{% tab-content %}}
+
+{{< code-tabs-wrapper >}}
+{{% code-tabs %}}
+[PowerShell](#)
+[CMD](#)
+{{% /code-tabs %}}
+
+{{% code-tab-content %}}
+```sh
+$env:INFLUX_TOKEN = “YourAuthenticationToken"
+```
+{{% /code-tab-content %}}
+
+{{% code-tab-content %}}
+```sh
+set INFLUX_TOKEN=YourAuthenticationToken
+# Make sure to include a space character at the end of this command.
+```
+{{% /code-tab-content %}}
+{{< /code-tabs-wrapper >}}
+
+{{% /tab-content %}}
+{{< /tabs-wrapper >}}
### Start the Telegraf service
-Start the Telegraf service using the `-config` flag to specify the URL of your generated configuration file.
+
+Start the Telegraf service using the `-config` flag to specify the location of the generated Telegraf configuration file.
+
+- For Windows, the location is always a local file path.
+- For Linux and macOS, the location can be a local file path or URL.
+
Telegraf starts using the Telegraf configuration pulled from InfluxDB API.
-_See the configuration **Setup Instructions** for the exact command._
+{{% note %}}
+InfluxDB host URLs and ports differ between InfluxDB OSS and InfluxDB Cloud.
+For the exact command, see the Telegraf configuration **Setup Instructions** in the InfluxDB UI.
+{{% /note %}}
```sh
telegraf -config http://localhost:9999/api/v2/telegrafs/0xoX00oOx0xoX00o
```
## Manage Telegraf configurations
+
Create, view, and manage Telegraf configurations in the InfluxDB UI.
{{< children >}}
diff --git a/content/v2.0/write-data/use-telegraf/auto-config/delete-telegraf-config.md b/content/v2.0/write-data/use-telegraf/auto-config/delete-telegraf-config.md
index d68a54f60..a021bb107 100644
--- a/content/v2.0/write-data/use-telegraf/auto-config/delete-telegraf-config.md
+++ b/content/v2.0/write-data/use-telegraf/auto-config/delete-telegraf-config.md
@@ -12,15 +12,13 @@ weight: 303
To delete a Telegraf configuration:
-1. Click the **Settings** tab in the navigation bar.
+1. Click **Load Data** in the left navigation menu.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "load data" >}}
-2. Click the **Telegraf** tab.
+2. Select **Telegraf**.
3. Hover over the configuration you want to delete, click the **{{< icon "trash" >}}**
- icon, and **Delete**.
-
- {{< img-hd src="/img/2-0-telegraf-config-delete.png" />}}
+ icon, and click **Delete**.
{{% note %}}
Deleting a Telegraf configuration does not affect _**running**_ Telegraf agents.
diff --git a/content/v2.0/write-data/use-telegraf/auto-config/update-telegraf-config.md b/content/v2.0/write-data/use-telegraf/auto-config/update-telegraf-config.md
index 1f001756f..aa26523eb 100644
--- a/content/v2.0/write-data/use-telegraf/auto-config/update-telegraf-config.md
+++ b/content/v2.0/write-data/use-telegraf/auto-config/update-telegraf-config.md
@@ -17,13 +17,11 @@ of a Telegraf configuration created in the UI.
You cannot modify Telegraf settings in existing Telegraf configurations through the UI.
{{% /note %}}
-1. Click the **Settings** tab in the navigation bar.
+1. Click **Load Data** in left the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "load data" >}}
-2. Click the **Telegraf** tab.
+2. Select **Telegraf**.
3. Hover over the configuration you want to edit and click **{{< icon "pencil" >}}**
to update the name or description.
4. Press Return or click out of the editable field to save your changes.
-
- {{< img-hd src="/img/2-0-telegraf-config-update.png" />}}
diff --git a/content/v2.0/write-data/use-telegraf/auto-config/view-telegraf-config.md b/content/v2.0/write-data/use-telegraf/auto-config/view-telegraf-config.md
index ffe46cb51..52b87b54f 100644
--- a/content/v2.0/write-data/use-telegraf/auto-config/view-telegraf-config.md
+++ b/content/v2.0/write-data/use-telegraf/auto-config/view-telegraf-config.md
@@ -14,11 +14,11 @@ weight: 301
View Telegraf configuration information in the InfluxDB user interface (UI):
-1. Click the **Settings** tab in the navigation bar.
+1. Click **Load Data** in the navigation bar.
- {{< nav-icon "settings" >}}
+ {{< nav-icon "load data" >}}
-2. Click the **Telegraf** tab.
+2. Click **Telegraf**.
### View and download the telegraf.conf
To view the actual `telegraf.conf` associated with the configuration,
diff --git a/content/v2.0/write-data/use-telegraf/manual-config.md b/content/v2.0/write-data/use-telegraf/manual-config.md
index 1e013af5f..4c4937843 100644
--- a/content/v2.0/write-data/use-telegraf/manual-config.md
+++ b/content/v2.0/write-data/use-telegraf/manual-config.md
@@ -24,10 +24,13 @@ for using Telegraf with InfluxDB v2.0._
## Configure Telegraf input and output plugins
Configure Telegraf input and output plugins in the Telegraf configuration file (typically named `telegraf.conf`).
-[Input plugins](https://docs.influxdata.com/telegraf/v1.9/plugins/inputs/) collect metrics.
-[Output plugins](https://docs.influxdata.com/telegraf/v1.9/plugins/outputs/) define destinations where metrics are sent.
+Input plugins collect metrics.
+Output plugins define destinations where metrics are sent.
+
+_See [Telegraf plugins](/v2.0/reference/telegraf-plugins/) for a complete list of available plugins._
## Enable and configure the InfluxDB v2 output plugin
+
To send data to an InfluxDB v2.0 instance, enable in the
[`influxdb_v2` output plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb_v2/README.md)
in the `telegraf.conf`.
@@ -39,8 +42,11 @@ An array of URLs for your InfluxDB v2.0 instances.
_By default, InfluxDB runs on port `9999`._
{{% cloud-msg %}}
-To write data to an {{< cloud-name "short" >}} instance, use the URL of your {{< cloud-name "short" >}}
-user interface (UI).
+###### {{< cloud-name "short" >}} URL
+To write data to an {{< cloud-name "short" >}} instance, use the URL of your
+{{< cloud-name "short" >}} user interface (UI).
+{{< cloud-name >}} requires HTTPS.
+
For example: https://us-west-2-1.aws.cloud2.influxdata.com
{{% /cloud-msg %}}
@@ -49,14 +55,45 @@ Your InfluxDB v2.0 authorization token.
For information about viewing tokens, see [View tokens](/v2.0/security/tokens/view-tokens/).
{{% note %}}
-#### Avoid storing tokens in plain text
-InfluxData does not recommend storing authentication tokens in plain text in the `telegraf.conf`.
-A secure alternative is to set the `INFLUX_TOKEN` environment variable and include
-it into your configuration file.
+###### Avoid storing tokens in `telegraf.conf`
+We recommend storing your tokens by setting the `INFLUX_TOKEN` environment variable and including the environment variable in your configuration file.
+{{< tabs-wrapper >}}
+{{% tabs %}}
+[macOS or Linux](#)
+[Windows](#)
+{{% /tabs %}}
+
+{{% tab-content %}}
```sh
export INFLUX_TOKEN=YourAuthenticationToken
```
+{{% /tab-content %}}
+
+{{% tab-content %}}
+
+{{< code-tabs-wrapper >}}
+{{% code-tabs %}}
+[PowerShell](#)
+[CMD](#)
+{{% /code-tabs %}}
+
+{{% code-tab-content %}}
+```sh
+$env:INFLUX_TOKEN = “YourAuthenticationToken"
+```
+{{% /code-tab-content %}}
+
+{{% code-tab-content %}}
+```sh
+set INFLUX_TOKEN=YourAuthenticationToken
+# Make sure to include a space character at the end of this command.
+```
+{{% /code-tab-content %}}
+{{< /code-tabs-wrapper >}}
+
+{{% /tab-content %}}
+{{< /tabs-wrapper >}}
_See the [example `telegraf.conf` below](#example-influxdb-v2-configuration)._
{{% /note %}}
@@ -68,6 +105,8 @@ The name of the organization that owns the target bucket.
The name of the bucket to write data to.
#### Example influxdb_v2 configuration
+The example below illustrates `influxdb_v2` configurations that write to InfluxDB OSS or {{< cloud-name >}}.
+
{{< code-tabs-wrapper >}}
{{% code-tabs %}}
[InfluxDB OSS](#)
@@ -98,16 +137,20 @@ The name of the bucket to write data to.
# ...
```
+
{{% /code-tab-content %}}
{{< /code-tabs-wrapper >}}
{{% note %}}
+
##### Write to InfluxDB v1.x and v2.0
+
If a Telegraf agent is already writing to an InfluxDB v1.x database,
enabling the InfluxDB v2 output plugin will write data to both v1.x and v2.0 instances.
{{% /note %}}
## Start Telegraf
+
Start the Telegraf service using the `-config` flag to specify the location of your `telegraf.conf`.
```sh
diff --git a/data/products.yml b/data/products.yml
index e2bf5e442..8ae7981b9 100644
--- a/data/products.yml
+++ b/data/products.yml
@@ -4,6 +4,6 @@ enterprise:
link: "#"
cloud:
- name: "InfluxDB Cloud 2.0 Beta"
+ name: "InfluxDB Cloud 2.0"
shortname: "InfluxDB Cloud"
link: "https://cloud2.influxdata.com/beta/signup"
diff --git a/data/telegraf_plugin_filters.yml b/data/telegraf_plugin_filters.yml
new file mode 100644
index 000000000..26aadde69
--- /dev/null
+++ b/data/telegraf_plugin_filters.yml
@@ -0,0 +1,30 @@
+filters:
+ - category: Plugin type
+ values:
+ - Input
+ - Output
+ - Aggregator
+ - Processor
+ - category: Plugin category
+ values:
+ - Applications
+ - Build & Deploy
+ - Cloud
+ - Containers
+ - Data Stores
+ - IoT
+ - Logging
+ - Messaging
+ - Networking
+ - Servers
+ - Systems
+ - Web
+ - category: Operating system
+ values:
+ - Linux
+ - macOS
+ - Windows
+ - category: Status
+ values:
+ - New
+ - Deprecated
diff --git a/data/telegraf_plugins.yml b/data/telegraf_plugins.yml
new file mode 100644
index 000000000..43b2c1630
--- /dev/null
+++ b/data/telegraf_plugins.yml
@@ -0,0 +1,2259 @@
+############## %%%%%% %% %% %%%%% %% %% %%%%%% %%%% ##############
+############## %% %%% %% %% %% %% %% %% %% ##############
+############## %% %% %%% %%%%% %% %% %% %%%% ##############
+############## %% %% %% %% %% %% %% %% ##############
+############## %%%%%% %% %% %% %%%% %% %%%% ##############
+
+input:
+ - name: AMQP Consumer
+ id: amqp_consumer
+ description: |
+ The AMQP Consumer input plugin provides a consumer for use with AMQP 0-9-1,
+ a prominent implementation of this protocol
+ being RabbitMQ.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/amqp_consumer/README.md
+ introduced: 1.3.0
+ tags: [linux, macos, windows, messaging]
+
+ - name: ActiveMQ
+ id: activemq
+ description: |
+ The ActiveMQ input plugin gathers queues, topics, and subscriber metrics
+ using the ActiveMQ Console API.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/activemq/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows, messaging]
+
+ - name: Aerospike
+ id: aerospike
+ description: |
+ The Aerospike input plugin queries Aerospike servers and gets node statistics
+ and statistics for all configured namespaces.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/aerospike/README.md
+ introduced: 0.2.0
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Amazon CloudWatch Statistics
+ id: cloudwatch
+ description: |
+ The Amazon CloudWatch Statistics input plugin pulls metric statistics from Amazon CloudWatch.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/cloudwatch/README.md
+ introduced: 0.12.1
+ tags: [linux, macos, windows, cloud]
+
+ - name: Amazon Kinesis Consumer
+ id: kinesis_consumer
+ description: |
+ The Amazon Kinesis Consumer input plugin reads from a Kinesis data stream and creates
+ metrics using one of the supported [input data formats](https://docs.influxdata.com/telegraf/latest/data_formats/input).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/kinesis_consumer/README.md
+ introduced: 1.10.0
+ tags: [linux, macos, windows, cloud, messaging]
+
+ - name: Apache Aurora
+ id: aurora
+ description: |
+ The Aurora input plugin gathers metrics from [Apache Aurora](https://aurora.apache.org/) schedulers.
+ For monitoring recommendations, see [Monitoring your Aurora cluster](https://aurora.apache.org/documentation/latest/operations/monitoring/).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/aurora/README.md
+ introduced: 1.7.0
+ tags: [linux, macos, windows, applications, containers]
+
+ - name: Apache HTTP Server
+ id: apache
+ description: |
+ The Apache HTTP Server input plugin collects server performance information
+ using the `mod_status` module of the Apache HTTP Server.
+
+ Typically, the `mod_status` module is configured to expose a page at the
+ `/server-status?auto` location of the Apache server.
+ The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus)
+ option must be enabled in order to collect all available fields.
+ For information about how to configure your server reference, see the
+ [module documentation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/apache/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows, servers, web]
+
+ - name: Apache Kafka Consumer
+ id: kafka_consumer
+ description: |
+ The Apache Kafka Consumer input plugin polls a specified Kafka topic and adds messages to InfluxDB.
+ Messages are expected in the line protocol format.
+ [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup)
+ is used to talk to the Kafka cluster so multiple instances of Telegraf can read
+ from the same topic in parallel.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/kafka_consumer/README.md
+ introduced: 0.2.3
+ tags: [linux, macos, windows, messaging]
+
+ - name: Apache Mesos
+ id: mesos
+ description: |
+ The Apache Mesos input plugin gathers metrics from Mesos. For more information, please check the
+ [Mesos Observability Metrics](http://mesos.apache.org/documentation/latest/monitoring/) page.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/mesos/README.md
+ introduced: 0.10.3
+ tags: [linux, macos, windows, containers]
+
+
+ - name: Apache Solr
+ id: solr
+ description: |
+ The Apache Solr input plugin collects stats using the MBean Request Handler.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/solr/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Apache Tomcat
+ id: tomcat
+ description: |
+ The Apache Tomcat input plugin collects statistics available from the Apache
+ Tomcat manager status page (`http:///manager/status/all?XML=true`).
+ Using `XML=true` returns XML data.
+ See the [Apache Tomcat documentation](https://tomcat.apache.org/tomcat-9.0-doc/manager-howto.html#Server_Status)
+ for details on these statistics.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/tomcat/README.md
+ introduced: 1.4.0
+ tags: [linux, macos, windows, servers, web]
+
+ - name: Apache Zipkin
+ id: zipkin
+ description: |
+ The Apache Zipkin input plugin implements the Zipkin HTTP server to gather trace
+ and timing data needed to troubleshoot latency problems in microservice architectures.
+
+ > This plugin is experimental. Its data schema may be subject to change based on
+ > its main usage cases and the evolution of the OpenTracing standard.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/zipkin/README.md
+ introduced: 1.4.0
+ tags: [linux, macos, windows, networking]
+
+ - name: Apache Zookeeper
+ id: zookeeper
+ description: |
+ The Apache Zookeeper input plugin collects variables output from the `mntr`
+ command [Zookeeper Admin](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/zookeeper/README.md
+ introduced: 0.2.0
+ tags: [linux, macos, windows, build-deploy]
+
+ - name: Apcupsd
+ id: apcupsd
+ description: |
+ The Apcupsd input plugin reads data from an apcupsd daemon over its NIS network protocol.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/apcupsd/README.md
+ introduced: 1.12.0
+ tags: [linux, macos, windows, systems]
+
+ - name: Bcache
+ id: bcache
+ description: |
+ The Bcache input plugin gets bcache statistics from the `stats_total` directory and `dirty_data` file.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/bcache/README.md
+ introduced: 0.2.0
+ tags: [linux, macos, windows, systems]
+
+ - name: Beanstalkd
+ id: beanstalkd
+ description: |
+ The Beanstalkd input plugin collects server stats as well as tube stats
+ (reported by `stats` and `stats-tube` commands respectively).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/beanstalkd/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows, messaging]
+
+ - name: BIND 9 Nameserver Statistics
+ id: bind
+ description: |
+ plugin decodes the JSON or XML statistics provided by BIND 9 nameservers.
+ links: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/bind/README.md
+ introduced: 1.11.0
+ tags: [linux, macos, windows, netoworking]
+
+ - name: Bond
+ id: bond
+ description: |
+ The Bond input plugin collects network bond interface status, bond's slaves
+ interfaces status and failures count of bond's slaves interfaces.
+ The plugin collects these metrics from `/proc/net/bonding/*` files.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/bond/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, networking]
+
+ - name: Burrow
+ id: burrow
+ description: |
+ The Burrow input plugin collects Apache Kafka topic, consumer, and partition
+ status using the [Burrow](https://github.com/linkedin/Burrow)
+ [HTTP Endpoint](https://github.com/linkedin/Burrow/wiki/HTTP-Endpoint).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/burrow/README.md
+ introduced: 1.7.0
+ tags: [linux, macos, windows, messaging]
+
+ - name: Cassandra
+ id: cassandra
+ description: |
+ *Deprecated in Telegraf 1.7.0 in favor of the [jolokia2](#jolokia2_agent) input plugin.
+ See [example Jolokia2/Cassandra configurations](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia2/examples/cassandra.conf).*
+
+ The Cassandra input plugin collects Cassandra 3 / JVM metrics exposed as MBean
+ attributes through the jolokia REST endpoint.
+ All metrics are collected for each server configured.
+ link: https://github.com/influxdata/telegraf/tree/release-1.6/plugins/inputs/cassandra
+ introduced: 0.12.1
+ deprecated: 1.7.0
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Ceph Storage
+ id: ceph
+ description: |
+ The Ceph Storage input plugin collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/ceph/README.md
+ introduced: 0.13.1
+ tags: [linux, macos, windows, data-stores]
+
+ - name: CGroup
+ id: cgroup
+ description: |
+ The CGroup input plugin captures specific statistics per cgroup.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/cgroup/README.md
+ introduced: 1.0.0
+ tags: [linux, macos, windows, systems]
+
+ - name: Chrony
+ id: chrony
+ description: |
+ The Chrony input plugin gets standard chrony metrics, requires chronyc executable.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/chrony/README.md
+ introduced: 0.13.1
+ tags: [linux, macos, windows, networking, systems]
+
+ - name: Cisco GNMI Telemetry
+ id: cisco_telemetry_gnmi
+ description: |
+ Cisco GNMI Telemetry is an input plugin that consumes telemetry data similar to the GNMI specification.
+ This GRPC-based protocol can utilize TLS for authentication and encryption.
+ This plugin has been developed to support GNMI telemetry as produced by Cisco IOS XR (64-bit) version 6.5.1 and later.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/cisco_telemetry_gnmi/README.md
+ introduced: 1.11.0
+ tags: [linux, macos, windows, applications]
+
+ - name: Cisco Model-driven Telemetry (MDT)
+ id: cisco_telemetry_mdt
+ description: |
+ Cisco model-driven telemetry (MDT) is an input plugin that consumes telemetry data from Cisco IOS XR, IOS XE and NX-OS platforms.
+ It supports TCP & GRPC dialout transports. GRPC-based transport can utilize TLS for authentication and encryption.
+ Telemetry data is expected to be GPB-KV (self-describing-gpb) encoded.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/cisco_telemetry_mdt/README.md
+ introduced: 1.11.0
+ tags: [linux, macos, windows, applications]
+
+ - name: Conntrack
+ id: conntrack
+ description: |
+ The Conntrack input plugin collects stats from Netfilter's conntrack-tools.
+
+ The conntrack-tools provide a mechanism for tracking various aspects of
+ network connections as they are processed by netfilter.
+ At runtime, conntrack exposes many of those connection statistics within `/proc/sys/net`.
+ Depending on your kernel version, these files can be found in either `/proc/sys/net/ipv4/netfilter`
+ or `/proc/sys/net/netfilter` and will be prefixed with either `ip_` or `nf_`.
+ This plugin reads the files specified in its configuration and publishes each one as a field,
+ with the prefix normalized to `ip_`.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/conntrack/README.md
+ introduced: 1.0.0
+ tags: [linux, macos, windows, networking]
+
+ - name: Consul
+ id: consul
+ description: |
+ The Consul input plugin will collect statistics about all health checks registered in the Consul.
+ It uses Consul API to query the data.
+ It will not report the telemetry but Consul can report those stats already using StatsD protocol, if needed.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/consul/README.md
+ introduced: 1.0.0
+ tags: [linux, macos, windows, build-deploy, containers]
+
+ - name: Couchbase
+ id: couchbase
+ description: |
+ The Couchbase input plugin reads per-node and per-bucket metrics from Couchbase.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/couchbase/README.md
+ introduced: 0.12.0
+ tags: [linux, macos, windows, data-stores]
+
+ - name: CouchDB
+ id: couchdb
+ description: |
+ The CouchDB input plugin gathers metrics of CouchDB using `_stats` endpoint.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/couchdb/README.md
+ introduced: 0.10.3
+ tags: [linux, macos, windows, data-stores]
+
+ - name: CPU
+ id: cpu
+ description: |
+ The CPU input plugin gathers metrics about cpu usage.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/cpu/README.md
+ introduced: 0.1.5
+ tags: [linux, macos, windows, systems]
+
+ - name: Disk
+ id: disk
+ description: |
+ The Disk input plugin gathers metrics about disk usage by mount point.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/disk/README.md
+ introduced: 0.1.1
+ tags: [linux, macos, windows, systems]
+
+ - name: DiskIO
+ id: diskio
+ description: |
+ The DiskIO input plugin gathers metrics about disk IO by device.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/diskio/README.md
+ introduced: 0.10.0
+ tags: [linux, macos, windows, systems]
+
+ - name: Disque
+ id: disque
+ description: |
+ The Disque input plugin gathers metrics from one or more [Disque](https://github.com/antirez/disque) servers.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/disque
+ introduced: 0.10.0
+ tags: [linux, macos, windows, messaging]
+
+ - name: DMCache
+ id: dmcache
+ description: |
+ The DMCache input plugin provides a native collection for dmsetup-based statistics for dm-cache.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/dmcache/README.md
+ introduced: 1.3.0
+ tags: [linux, macos, windows, systems]
+
+ - name: DNS Query
+ id: dns_query
+ description: |
+ The DNS Query input plugin gathers DNS query times in milliseconds -
+ like [Dig](https://en.wikipedia.org/wiki/Dig_(command)).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/dns_query/README.md
+ introduced: 1.4.0
+ tags: [linux, macos, windows, networking]
+
+ - name: Docker
+ id: docker
+ description: |
+ The Docker input plugin uses the Docker Engine API to gather metrics on running Docker containers.
+ The Docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client)
+ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/) library documentation.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker/README.md
+ introduced: 0.1.9
+ tags: [linux, macos, windows, build-deploy, containers]
+
+ - name: Docker Log
+ id: docker_log
+ description: |
+ The Docker Log input plugin uses the Docker Engine API to collect logs from running Docker containers.
+ The plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client)
+ to gather logs from the [Engine API](https://docs.docker.com/engine/api/v1.24/).
+
+ > This plugin works only for containers with the local or `json-file` or `journald` logging driver.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker_log/README.md
+ introduced: 1.12.0
+ tags: [linux, macos, windows, build-deploy, containers, logging]
+
+ - name: Dovecot
+ id: dovecot
+ description: |
+ The Dovecot input plugin uses the dovecot Stats protocol to gather metrics on configured domains.
+ For more information, see the [Dovecot documentation](http://wiki2.dovecot.org/Statistics).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/dovecot/README.md
+ introduced: 0.10.3
+ tags: [linux, macos, windows, applications, web]
+
+ - name: ECS
+ id: ecs
+ description: |
+ ECS, Fargate compatible, input plugin which uses the ECS v2 metadata and stats API endpoints to gather stats on running containers in a Task.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/ecs/README.md
+ introduced: 1.11.0
+ tags: [linux, macos, windows, cloud, containers]
+
+ - name: Elasticsearch
+ id: elasticsearch
+ description: |
+ The Elasticsearch input plugin queries endpoints to obtain [node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html)
+ and optionally [cluster-health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html)
+ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) metrics.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/elasticsearch/README.md
+ introduced: 0.1.5
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Exec
+ id: exec
+ description: |
+ The Exec input plugin parses supported [Telegraf input data formats](https://docs.influxdata.com/telegraf/latest/data_formats/input/)
+ (line protocol, JSON, Graphite, Value, Nagios, Collectd, and Dropwizard) into metrics.
+ Each Telegraf metric includes the measurement name, tags, fields, and timestamp.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/exec/README.md
+ introduced: 0.1.5
+ tags: [linux, macos, windows]
+
+ - name: Fail2ban
+ id: fail2ban
+ description: |
+ The Fail2ban input plugin gathers the count of failed and banned IP addresses
+ using [fail2ban](https://www.fail2ban.org/).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/fail2ban/README.md
+ introduced: 1.4.0
+ tags: [linux, macos, windows, networking, security]
+
+ - name: Fibaro
+ id: fibaro
+ description: |
+ The Fibaro input plugin makes HTTP calls to the Fibaro controller API to gather values of hooked devices.
+ Those values could be true (`1`) or false (`0`) for switches, percentage for dimmers, temperature, etc.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/fibaro/README.md
+ introduced: 1.7.0
+ tags: [linux, macos, windows, iot]
+
+ - name: File
+ id: file
+ description: |
+ The File input plugin updates a list of files every interval and parses
+ the contents using the selected input data format.
+
+ Files will always be read in their entirety. If you wish to tail or follow a file,
+ then use the [Tail input plugin](#tail).
+
+ > To parse metrics from multiple files that are formatted in one of the supported
+ > [input data formats](https://docs.influxdata.com/telegraf/latest/data_formats/input),
+ > use the [Multifile input plugin](#multifile).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/file/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows, systems]
+
+ - name: Filecount
+ id: filecount
+ description: |
+ The Filecount input plugin reports the number and total size of files in directories that match certain criteria.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/filecount/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows, systems]
+
+ - name: Filestat
+ id: filestat
+ description: |
+ The Filestat input plugin gathers metrics about file existence, size, and other stats.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/filestat/README.md
+ introduced: 0.13.0
+ tags: [linux, macos, windows, systems]
+
+ - name: Fireboard
+ id: fireboard
+ description: |
+ The Fireboard input plugin gathers real time temperature data from Fireboard thermometers.
+ To use this input plugin, sign up to use the [Fireboard REST API](https://docs.fireboard.io/reference/restapi.html).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/fireboard/README.md
+ introduced: 1.12.0
+ tags: [linux, macos, windows, cloud, iot]
+
+ - name: Fluentd
+ id: fluentd
+ description: |
+ The Fluentd input plugin gathers Fluentd server metrics from plugin endpoint provided by in_monitor plugin.
+ This plugin understands data provided by `/api/plugin.json` resource (`/api/config.json` is not covered).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/fluentd/README.md
+ introduced: 1.4.0
+ tags: [linux, macos, windows, servers]
+
+ - name: GitHub
+ id: github
+ description: |
+ Gathers repository information from GitHub-hosted repositories.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/github/README.md
+ introduced: 1.11.0
+ tags: [linux, macos, windows, applications]
+
+ - name: Google Cloud PubSub
+ id: cloud_pubsub
+ description: |
+ The Google Cloud PubSub input plugin ingests metrics from
+ [Google Cloud PubSub](https://cloud.google.com/pubsub) and creates metrics
+ using one of the supported [input data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/cloud_pubsub/README.md
+ introduced: 1.10.0
+ tags: [linux, macos, windows, cloud, messaging]
+
+ - name: Google Cloud PubSub Push
+ id: cloud_pubsub_push
+ description: |
+ The Google Cloud PubSub Push (`cloud_pubsub_push`) input plugin listens for
+ messages sent using HTTP POST requests from Google Cloud PubSub.
+ The plugin expects messages in Google's Pub/Sub JSON Format ONLY.
+ The intent of the plugin is to allow Telegraf to serve as an endpoint of the
+ Google Pub/Sub 'Push' service. Google's PubSub service will only send over
+ HTTPS/TLS so this plugin must be behind a valid proxy or must be configured to use TLS.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/cloud_pubsub_push/README.md
+ introduced: 1.10.0
+ tags: [linux, macos, windows, cloud, messaging]
+
+ - name: Graylog
+ id: graylog
+ description: |
+ The Graylog input plugin can collect data from remote Graylog service URLs. This plugin currently supports two
+ types of endpoints:
+
+ - multiple (e.g., `http://[graylog-server-ip]:12900/system/metrics/multiple`)
+ - namespace (e.g., `http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace}`)
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/graylog/README.md
+ introduced: 1.0.0
+ tags: [linux, macos, windows, logging]
+
+ - name: HAproxy
+ id: haproxy
+ description: |
+ The HAproxy input plugin gathers metrics directly from any running HAproxy instance.
+ It can do so by using CSV generated by HAproxy status page or from admin sockets.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/haproxy/README.md
+ introduced: 0.1.5
+ tags: [linux, macos, windows, networking, web]
+
+ - name: Hddtemp
+ id: hddtemp
+ description: |
+ The Hddtemp input plugin reads data from `hddtemp` daemons.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/hddtemp/README.md
+ introduced: 1.0.0
+ tags: [linux, macos, windows, systems]
+
+ - name: HTTP
+ id: http
+ description: |
+ The HTTP input plugin collects metrics from one or more HTTP (or HTTPS) endpoints.
+ The endpoint should have metrics formatted in one of the [supported input data formats](https://docs.influxdata.com/telegraf/latest/data_formats/input/).
+ Each data format has its own unique set of configuration options which can be added to the input configuration.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/http/README.md
+ introduced: 1.6.0
+ tags: [linux, macos, windows, servers, web]
+
+ - name: HTTP JSON
+ id: httpjson
+ description: |
+ _Deprecated in Telegraf 1.6.0. Use the [HTTP input plugin](#http)._
+
+ The HTTP JSON input plugin collects data from HTTP URLs which respond with JSON.
+ It flattens the JSON and finds all numeric values, treating them as floats.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/httpjson/README.md
+ introduced: 0.1.6
+ deprecated: 1.6.0
+ tags: [linux, macos, windows, servers, web]
+
+ - name: HTTP Listener
+ id: http_listener
+ description: |
+ The `http_listener` input plugin was renamed to [`influxdb_listener`](#influxdb_listener).
+ The new name better describes the intended use of the plugin as a InfluxDB relay.
+ For general purpose transfer of metrics in any format via HTTP, use [`http_listener_v2`](#http_listener_v2)instead.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/influxdb_listener/README.md
+ introduced: 1.1.0
+ deprecated: 1.9.0
+ tags: [linux, macos, windows, servers, web]
+
+ - name: HTTP Listener v2
+ id: http_listener_v2
+ description: |
+ The HTTP Listener v2 input plugin listens for messages sent via HTTP POST.
+ Messages are expected in [line protocol format](https://docs.influxdata.com/telegraf/latest/data_formats/input/influx)
+ ONLY (other [Telegraf input data formats](https://docs.influxdata.com/telegraf/latest//data_formats/input/) are not supported).
+ This plugin allows Telegraf to serve as a proxy or router for the `/write` endpoint of the InfluxDB v2110 HTTP API.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/http_listener_v2/README.md
+ introduced: 1.9.0
+ tags: [linux, macos, windows, servers, web]
+
+ - name: HTTP Response
+ id: http_response
+ description: |
+ The HTTP Response input plugin gathers metrics for HTTP responses.
+ The measurements and fields include `response_time`, `http_response_code`,
+ and `result_type`. Tags for measurements include `server` and `method`.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/http_response/README.md
+ introduced: 0.12.1
+ tags: [linux, macos, windows, servers, web]
+
+ - name: Icinga 2
+ id: icinga2
+ description: |
+ The Icinga 2 input plugin gather status on running services and hosts using
+ the [Icinga 2 API](https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/icinga2-api).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/icinga2
+ introduced: 1.8.0
+ tags: [linux, macos, windows, networking, servers, systems]
+
+ - name: InfluxDB v1.x
+ id: influxdb
+ description: |
+ The InfluxDB v1.x input plugin gathers metrics from the exposed InfluxDB v1.x `/debug/vars` endpoint.
+ Using Telegraf to extract these metrics to create a "monitor of monitors" is a
+ best practice and allows you to reduce the overhead associated with capturing
+ and storing these metrics locally within the `_internal` database for production deployments.
+ [Read more about this approach here](https://www.influxdata.com/blog/influxdb-debugvars-endpoint/).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/influxdb/README.md
+ introduced: 0.2.5
+ tags: [linux, macos, windows, data-stores]
+
+ - name: InfluxDB v2
+ id: influxdb
+ description: |
+ InfluxDB 2.x exposes its metrics using the Prometheus Exposition Format — there is no InfluxDB v2 input
+ plugin.
+
+ To collect data on an InfluxDB 2.x instance running on localhost, the configuration for the
+ Prometheus input plugin would be:
+
+ ```
+ [[inputs.prometheus]]
+ ## An array of urls to scrape metrics from.
+ urls = ["http://localhost:9999/metrics"]
+ ```
+ introduced: 1.8.0
+ tags: [linux, macos, windows, data-stores]
+
+ - name: InfluxDB Listener
+ id: influxdb_listener
+ description: |
+ The InfluxDB Listener input plugin listens for requests sent
+ according to the [InfluxDB HTTP API](https://docs.influxdata.com/influxdb/latest/guides/writing_data/).
+ The intent of the plugin is to allow Telegraf to serve as a proxy, or router,
+ for the HTTP `/write` endpoint of the InfluxDB HTTP API.
+
+ > This plugin was previously known as `http_listener`. If you wish to
+ > send general metrics via HTTP, use the [HTTP Listener v2 input plugin](#http_listener_v2) instead.
+
+ The `/write` endpoint supports the `precision` query parameter and can be set
+ to one of `ns`, `u`, `ms`, `s`, `m`, `h`. All other parameters are ignored and
+ defer to the output plugins configuration.
+
+ When chaining Telegraf instances using this plugin, `CREATE DATABASE` requests
+ receive a `200 OK` response with message body `{"results":[]}` but they are not
+ relayed. The output configuration of the Telegraf instance which ultimately
+ submits data to InfluxDB determines the destination database.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/influxdb_listener/README.md
+ introduced: 1.9.0
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Interrupts
+ id: interrupts
+ description: |
+ The Interrupts input plugin gathers metrics about IRQs, including `interrupts`
+ (from `/proc/interrupts`) and `soft_interrupts` (from `/proc/softirqs`).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/interrupts/README.md
+ introduced: 1.3.0
+ tags: [linux, macos, windows, systems]
+
+ - name: IPMI Sensor
+ id: ipmi_sensor
+ description: |
+ The IPMI Sensor input plugin queries the local machine or remote host
+ sensor statistics using the `ipmitool` utility.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/ipmi_sensor/README.md
+ introduced: 0.12.0
+ tags: [linux, macos, windows, iot]
+
+ - name: Ipset
+ id: ipset
+ description: |
+ The Ipset input plugin gathers packets and bytes counters from Linux `ipset`.
+ It uses the output of the command `ipset save`. Ipsets created without the `counters` option are ignored.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/ipset/README.md
+ introduced: 1.6.0
+ tags: [linux, macos, windows, networking, security, systems]
+
+ - name: IPtables
+ id: iptables
+ description: |
+ The IPtables input plugin gathers packets and bytes counters for rules within
+ a set of table and chain from the Linux iptables firewall.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/iptables/README.md
+ introduced: 1.1.0
+ tags: [linux, macos, windows, systems]
+
+ - name: IPVS
+ id: ipvs
+ description: |
+ The IPVS input plugin uses the Linux kernel netlink socket interface to
+ gather metrics about IPVS virtual and real servers.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/ipvs/README.md
+ introduced: 1.9.0
+ tags: [linux, macos, windows, systems]
+
+ - name: Jenkins
+ id: jenkins
+ description: |
+ The Jenkins input plugin gathers information about the nodes and jobs running
+ in a jenkins instance.
+
+ This plugin does not require a plugin on Jenkins and it makes use of Jenkins
+ API to retrieve all the information needed.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jenkins/README.md
+ introduced: 1.9.0
+ tags: [linux, macos, windows, build-deploy]
+
+ - name: Jolokia
+ id: jolokia
+ description: |
+ _Deprecated in Telegraf 1.5.0. Use the [Jolokia2 input plugin](#jolokia2_agent)._
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md
+ introduced: 0.2.1
+ deprecated: 1.5.0
+ tags: [linux, macos, windows, networking]
+
+ - name: Jolokia2 Agent
+ id: jolokia2_agent
+ description: |
+ The Jolokia2 Agent input plugin reads JMX metrics from one or more
+ [Jolokia](https://jolokia.org/) agent REST endpoints using the
+ [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia2/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, networking]
+
+ - name: Jolokia2 Proxy
+ id: jolokia2_proxy
+ description: |
+ The Jolokia2 Proxy input plugin reads JMX metrics from one or more targets by
+ interacting with a [Jolokia](https://jolokia.org/) proxy REST endpoint using the
+ [Jolokia](https://jolokia.org/) [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia2/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, networking]
+
+ - name: JTI OpenConfig Telemetry
+ id: jti_openconfig_telemetry
+ description: |
+ The JTI OpenConfig Telemetry input plugin reads Juniper Networks implementation
+ of OpenConfig telemetry data from listed sensors using the Junos Telemetry Interface.
+ Refer to [openconfig.net](http://openconfig.net/) for more details about OpenConfig
+ and [Junos Telemetry Interface (JTI)](https://www.juniper.net/documentation/en_US/junos/topics/concept/junos-telemetry-interface-oveview.html).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jti_openconfig_telemetry/README.md
+ introduced: 1.7.0
+ tags: [linux, macos, windows, iot]
+
+ - name: Kapacitor
+ id: kapacitor
+ description: |
+ The Kapacitor input plugin will collect metrics from the given Kapacitor instances.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/kapacitor/README.md
+ introduced: 1.3.0
+ tags: [linux, macos, windows, applications]
+
+ - name: Kernel
+ id: kernel
+ description: |
+ The Kernel input plugin gathers kernel statistics from `/proc/stat`.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/kernel/README.md
+ introduced: 0.11.0
+ tags: [linux, macos, windows, systems]
+
+ - name: Kernel VMStat
+ id: kernel_vmstat
+ description: |
+ The Kernel VMStat input plugin gathers kernel statistics from `/proc/vmstat`.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/kernel_vmstat/README.md
+ introduced: 1.0.0
+ tags: [linux, macos, windows, systems]
+
+ - name: Kibana
+ id: kibana
+ description: |
+ The Kibana input plugin queries the Kibana status API to obtain the health
+ status of Kibana and some useful metrics.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/kibana/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows, applications]
+
+ - name: Kubernetes
+ id: kubernetes
+ description: |
+ > The Kubernetes input plugin is experimental and may cause high cardinality
+ > issues with moderate to large Kubernetes deployments.
+
+ The Kubernetes input plugin talks to the kubelet API using the `/stats/summary`
+ endpoint to gather metrics about the running pods and containers for a single host.
+ It is assumed that this plugin is running as part of a daemonset within a
+ Kubernetes installation. This means that Telegraf is running on every node within the cluster.
+ Therefore, you should configure this plugin to talk to its locally running kubelet.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/kubernetes/README.md
+ introduced: 1.1.0
+ tags: [linux, macos, windows, build-deploy, containers]
+
+ - name: Kubernetes Inventory
+ id: kube_inventory
+ description: |
+ The Kubernetes Inventory input plugin generates metrics derived from the state
+ of the following Kubernetes resources:
+
+ - daemonsets
+ - deployments
+ - nodes
+ - persistentvolumes
+ - persistentvolumeclaims
+ - pods (containers)
+ - statefulsets
+
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/kube_inventory/README.md
+ introduced: 1.10.0
+ tags: [linux, macos, windows, build-deploy, containers]
+
+ - name: LeoFS
+ id: leofs
+ description: |
+ The LeoFS input plugin gathers metrics of LeoGateway, LeoManager, and LeoStorage using SNMP.
+ See [System monitoring](https://leo-project.net/leofs/docs/admin/system_admin/monitoring/)
+ in the [LeoFS documentation](https://leo-project.net/leofs/docs/) for more information.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/leofs/README.md
+ introduced: 0.1.5
+ tags: [linux, macos, windows, systems, data-stores]
+
+ - name: Linux Sysctl FS
+ id: linux_sysctl_fs
+ description: |
+ The Linux Sysctl FS input plugin provides Linux system level file (`sysctl fs`) metrics.
+ The documentation on these fields can be found [here](https://www.kernel.org/doc/Documentation/sysctl/fs.txt).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/linux_sysctl_fs/README.md
+ introduced: 1.3.0
+ tags: [linux, macos, windows, systems]
+
+ - name: Logparser
+ id: logparser
+ description: |
+ The Logparser input plugin streams and parses the given log files.
+ Currently, it has the capability of parsing "grok" patterns
+ from log files, which also supports regular expression (regex) patterns.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/logparser/README.md
+ introduced: 1.0.0
+ tags: [linux, macos, windows, logging]
+
+ - name: Logstash
+ id: logstash
+ description: |
+ The Logstash input plugin reads metrics exposed by the [Logstash Monitoring API](https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html).
+ The plugin supports Logstash 5 and later.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/logstash/README.md
+ introduced: 1.12.0
+ tags: [linux, macos, windows, logging]
+
+ - name: Lustre2
+ id: lustre2
+ description: |
+ Lustre Jobstats allows for RPCs to be tagged with a value, such as a job's ID.
+ This allows for per job statistics.
+ The Lustre2 input plugin collects statistics and tags the data with the `jobid`.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/lustre2
+ introduced: 0.1.5
+ tags: [linux, macos, windows, systems]
+
+ - name: Mailchimp
+ id: mailchimp
+ description: |
+ The Mailchimp input plugin gathers metrics from the `/3.0/reports` MailChimp API.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/mailchimp
+ introduced: 0.2.4
+ tags: [linux, macos, windows, cloud, web]
+
+ - name: MarkLogic
+ id: marklogic
+ description: |
+ The MarkLogic input plugin gathers health status metrics from one or more MarkLogic hosts.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/marklogic/README.md
+ introduced: 1.12.0
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Mcrouter
+ id: mcrouter
+ description: |
+ The Mcrouter input plugin gathers statistics data from a mcrouter instance.
+ [Mcrouter](https://github.com/facebook/mcrouter) is a memcached protocol router,
+ developed and maintained by Facebook, for scaling memcached (http://memcached.org/) deployments.
+ It's a core component of cache infrastructure at Facebook and Instagram where mcrouter
+ handles almost 5 billion requests per second at peak.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/mcrouter/README.md
+ introduced: 1.7.0
+ tags: [linux, macos, windows, networking]
+
+ - name: Mem
+ id: mem
+ description: |
+ The Mem input plugin collects system memory metrics.
+ For a more complete explanation of the difference between used and actual_used RAM,
+ see [Linux ate my ram](https://www.linuxatemyram.com/).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/mem/README.md
+ introduced: 0.1.5
+ tags: [linux, macos, windows, systems]
+
+ - name: Memcached
+ id: memcached
+ description: |
+ The Memcached input plugin gathers statistics data from a Memcached server.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/memcached/README.md
+ introduced: 0.1.2
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Mesosphere DC/OS
+ id: dcos
+ description: |
+ The Mesosphere DC/OS input plugin gathers metrics from a DC/OS cluster's
+ [metrics component](https://docs.mesosphere.com/1.10/metrics/).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/dcos/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, containers]
+
+ - name: Microsoft SQL Server
+ id: sqlserver
+ description: |
+ The Microsoft SQL Server input plugin provides metrics for your Microsoft SQL Server instance.
+ It currently works with SQL Server versions 2008+.
+ Recorded metrics are lightweight and use Dynamic Management Views supplied by SQL Server.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/sqlserver/README.md
+ introduced: 0.10.1
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Minecraft
+ id: minecraft
+ description: |
+ The Minecraft input plugin uses the RCON protocol to collect statistics from
+ a scoreboard on a Minecraft server.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/minecraft/README.md
+ introduced: 1.4.0
+ tags: [linux, macos, windows, gaming]
+
+ - name: MongoDB
+ id: mongodb
+ description: |
+ The MongoDB input plugin collects MongoDB stats exposed by `serverStatus` and
+ few more and create a single measurement containing values.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/mongodb/README.md
+ introduced: 0.1.5
+ tags: [linux, macos, windows, data-stores]
+
+ - name: MQTT Consumer
+ id: mqtt_consumer
+ description: |
+ The MQTT Consumer input plugin reads from specified MQTT topics and adds messages to InfluxDB.
+ Messages are in the [Telegraf input data formats](https://docs.influxdata.com/telegraf/latest/data_formats/input/).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/mqtt_consumer/README.md
+ introduced: 0.10.3
+ tags: [linux, macos, windows, messaging]
+
+ - name: Multifile
+ id: multifile
+ description: |
+ The Multifile input plugin allows Telegraf to combine data from multiple files
+ into a single metric, creating one field or tag per file.
+ This is often useful creating custom metrics from the `/sys` or `/proc` filesystems.
+
+ > To parse metrics from a single file formatted in one of the supported
+ > [input data formats](https://docs.influxdata.com/telegraf/latest/data_formats/input),
+ > use the [file input plugin](#file).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/multifile/README.md
+ introduced: 1.10.0
+ tags: [linux, macos, windows]
+
+ - name: MySQL
+ id: mysql
+ description: |
+ The MySQL input plugin gathers the statistics data from MySQL servers.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/mysql/README.md
+ introduced: 0.1.1
+ tags: [linux, macos, windows, data-stores]
+
+ - name: NATS Consumer
+ id: nats_consumer
+ description: |
+ The NATS Consumer input plugin reads from specified NATS subjects and adds messages to InfluxDB.
+ Messages are expected in the [Telegraf input data formats](https://docs.influxdata.com/telegraf/latest/data_formats/input/).
+ A Queue Group is used when subscribing to subjects so multiple instances of Telegraf
+ can read from a NATS cluster in parallel.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nats_consumer/README.md
+ introduced: 0.10.3
+ tags: [linux, macos, windows, messaging]
+
+ - name: NATS Server Monitoring
+ id: nats
+ description: |
+ The NATS Server Monitoring input plugin gathers metrics when using the
+ [NATS Server monitoring server](https://www.nats.io/documentation/server/gnatsd-monitoring/).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nats/README.md
+ introduced: 1.6.0
+ tags: [linux, macos, windows, messaging]
+
+ - name: Neptune Apex
+ id: neptune_apex
+ description: |
+ The Neptune Apex input plugin collects real-time data from the Apex `status.xml` page.
+ The Neptune Apex controller family allows an aquarium hobbyist to monitor and
+ control their tanks based on various probes.
+ The data is taken directly from the `/cgi-bin/status.xml` at the interval specified
+ in the `telegraf.conf` configuration file.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/neptune_apex/README.md
+ introduced: 1.10.0
+ tags: [linux, macos, windows, iot]
+
+ - name: Net
+ id: net
+ description: |
+ The Net input plugin gathers metrics about network interface usage (Linux only).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/net/NET_README.md
+ introduced: 0.1.1
+ tags: [linux, macos, networking]
+
+ - name: Netstat
+ id: netstat
+ description: |
+ The Netstat input plugin gathers TCP metrics such as established, time-wait
+ and sockets counts by using `lsof`.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/net/NETSTAT_README.md
+ introduced: 0.2.0
+ tags: [linux, macos, windows, networking, systems]
+
+ - name: Network Response
+ id: net_response
+ description: |
+ The Network Response input plugin tests UDP and TCP connection response time.
+ It can also check response text.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/net_response/README.md
+ introduced: 0.10.3
+ tags: [linux, macos, windows, networking]
+
+ - name: NGINX
+ id: nginx
+ description: |
+ The NGINX input plugin reads NGINX basic status information (`ngx_http_stub_status_module`).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nginx/README.md
+ introduced: 0.1.5
+ tags: [linux, macos, windows, servers, web]
+
+ - name: NGINX VTS
+ id: nginx_vts
+ description: |
+ The NGINX VTS input plugin gathers NGINX status using external virtual host
+ traffic status module - https://github.com/vozlt/nginx-module-vts.
+ This is an NGINX module that provides access to virtual host status information.
+ It contains the current status such as servers, upstreams, caches.
+ This is similar to the live activity monitoring of NGINX Plus.
+ For module configuration details, see the
+ [NGINX VTS module documentation](https://github.com/vozlt/nginx-module-vts#synopsis).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nginx_vts/README.md
+ introduced: 1.9.0
+ tags: [linux, macos, windows, servers, web]
+
+ - name: NGINX Plus
+ id: nginx_plus
+ description: |
+ The NGINX Plus input plugin is for NGINX Plus, the commercial version of the open source web server NGINX.
+ To use this plugin you will need a license.
+ For more information, see [What’s the Difference between Open Source NGINX and NGINX Plus?](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/).
+
+ Structures for NGINX Plus have been built based on history of
+ [status module documentation](http://nginx.org/en/docs/http/ngx_http_status_module.html).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nginx_plus/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, servers, web]
+
+ - name: NGINX Plus API
+ id: nginx_plus_api
+ description: |
+ The NGINX Plus API input plugin gathers advanced status information for NGINX Plus servers.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nginx_plus_api/README.md
+ introduced: 1.9.0
+ tags: [linux, macos, windows, servers, web]
+
+ - name: NGINX Upstream Check
+ id: nginx_upstream_check
+ description: |
+ The NGINX Upstream Check input plugin reads the status output of the
+ [nginx_upstream_check](https://github.com/yaoweibin/nginx_upstream_check_module).
+ This module can periodically check the NGINX upstream servers using the configured
+ request and interval to determine if the server is still available.
+ If checks are failed, then the server is marked as `down` and will not receive
+ any requests until the check passes and the server will be marked as `up` again.
+
+ The status page displays the current status of all upstreams and servers as well
+ as number of the failed and successful checks. This information can be exported
+ in JSON format and parsed by this input.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nginx_plus_api/README.md
+ introduced: 1.10.0
+ tags: [linux, macos, windows, servers, web]
+
+ - name: NSQ
+ id: nsq
+ description: |
+ The NSQ input plugin collects metrics from NSQD API endpoints.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nsq
+ introduced: 1.0.0
+ tags: [linux, macos, windows, messaging]
+
+ - name: NSQ Consumer
+ id: nsq_consumer
+ description: |
+ The NSQ Consumer input plugin polls a specified NSQD topic and adds messages to InfluxDB.
+ This plugin allows a message to be in any of the supported data_format types.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nsq_consumer/README.md
+ introduced: 0.10.1
+ tags: [linux, macos, windows, messaging]
+
+ - name: Nstat
+ id: nstat
+ description: |
+ The Nstat input plugin collects network metrics from `/proc/net/netstat`,
+ `/proc/net/snmp`, and `/proc/net/snmp6` files.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nstat/README.md
+ introduced: 0.13.1
+ tags: [linux, macos, windows, networking, systems]
+
+ - name: NTPq
+ id: ntpq
+ description: |
+ The NTPq input plugin gets standard NTP query metrics, requires ntpq executable.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/ntpq/README.md
+ introduced: 0.11.0
+ tags: [linux, macos, windows, networking, systems]
+
+ - name: NVIDIA SMI
+ id: nvidia_smi
+ description: |
+ The NVIDIA SMI input plugin uses a query on the [NVIDIA System Management Interface
+ (`nvidia-smi`)](https://developer.nvidia.com/nvidia-system-management-interface)
+ binary to pull GPU stats including memory and GPU usage, temp and other.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nvidia_smi/README.md
+ introduced: 1.7.0
+ tags: [linux, macos, windows, systems]
+
+ - name: OpenLDAP
+ id: openldap
+ description: |
+ The OpenLDAP input plugin gathers metrics from OpenLDAP's `cn=Monitor` backend.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/openldap/README.md
+ introduced: 1.4.0
+ tags: [linux, macos, windows, data-stores]
+
+ - name: OpenNTPD
+ id: openntpd
+ description: |
+ The OpenNTPD input plugin gathers standard Network Time Protocol (NTP) query
+ metrics from OpenNTPD using the `ntpctl` command.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/openntpd/README.md
+ introduced: 1.12.0
+ tags: [linux, macos, windows, networking]
+
+ - name: OpenSMTPD
+ id: opensmtpd
+ description: |
+ The OpenSMTPD input plugin gathers stats from [OpenSMTPD](https://www.opensmtpd.org/),
+ a free implementation of the server-side SMTP protocol.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/opensmtpd/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, applications]
+
+ - name: OpenWeatherMap
+ id: openweathermap
+ description: |
+ Collect current weather and forecast data from OpenWeatherMap.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/openweathermap/README.md
+ introduced: 1.11.0
+ tags: [linux, macos, windows, applications]
+
+ - name: PF
+ id: pf
+ description: |
+ The PF input plugin gathers information from the FreeBSD/OpenBSD pf firewall.
+ Currently it can retrive information about the state table: the number of current
+ entries in the table, and counters for the number of searches, inserts, and removals
+ to the table. The pf plugin retrieves this information by invoking the `pfstat` command.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/pf/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, networking, security]
+
+ - name: PgBouncer
+ id: pgbouncer
+ description: |
+ The PgBouncer input plugin provides metrics for your PgBouncer load balancer.
+ For information about the metrics, see the [PgBouncer documentation](https://pgbouncer.github.io/usage.html).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/pgbouncer/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Phfusion Passenger
+ id: passenger
+ description: |
+ The Phfusion 0Passenger input plugin gets Phusion Passenger statistics using
+ their command line utility `passenger-status`.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/passenger/README.md
+ introduced: 0.10.1
+ tags: [linux, macos, windows, web]
+
+ - name: PHP FPM
+ id: phpfpm
+ description: |
+ The PHP FPM input plugin gets phpfpm statistics using either HTTP status page or fpm socket.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/phpfpm/README.md
+ introduced: 0.1.10
+ tags: [linux, macos, windows, servers, web]
+
+ - name: Ping
+ id: ping
+ description: |
+ The Ping input plugin measures the round-trip for ping commands, response time,
+ and other packet statistics.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/ping/README.md
+ introduced: 0.1.8
+ tags: [linux, macos, windows, networking]
+
+ - name: Postfix
+ id: postfix
+ description: |
+ The Postfix input plugin reports metrics on the postfix queues.
+ For each of the active, hold, incoming, maildrop, and deferred
+ [queues](http://www.postfix.org/QSHAPE_README.html#queues),
+ it will report the queue length (number of items),
+ size (bytes used by items), and age (age of oldest item in seconds).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/postfix/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, services, web]
+
+ - name: PostgreSQL
+ id: postgresql
+ description: |
+ The PostgreSQL input plugin provides metrics for your PostgreSQL database.
+ It currently works with PostgreSQL versions 8.1+.
+ It uses data from the built-in `pg_stat_database` and `pg_stat_bgwriter` views.
+ The metrics recorded depend on your version of PostgreSQL.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/postgresql/README.md
+ introduced: 0.10.3
+ tags: [linux, macos, windows, data-stores]
+
+ - name: PostgreSQL Extensible
+ id: postgresql_extensible
+ description: |
+ This PostgreSQL Extensible input plugin provides metrics for your Postgres database.
+ It has been designed to parse SQL queries in the plugin section of `telegraf.conf` files.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/postgresql_extensible
+ introduced: 0.12.0
+ tags: [linux, macos, windows, data-stores]
+
+ - name: PowerDNS
+ id: powerdns
+ description: |
+ The PowerDNS input plugin gathers metrics about PowerDNS using UNIX sockets.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/powerdns/README.md
+ introduced: 0.10.2
+ tags: [linux, macos, windows, networking, web]
+
+ - name: PowerDNS Recursor
+ id: powerdns_recursor
+ description: |
+ The PowerDNS Recursor input plugin gathers metrics about PowerDNS Recursor using UNIX sockets.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/powerdns_recursor/README.md
+ introduced: 1.11.0
+ tags: [linux, macos, windows, networking, web]
+
+ - name: Processes
+ id: processes
+ description: |
+ The Processes input plugin gathers info about the total number of processes
+ and groups them by status (zombie, sleeping, running, etc.). On Linux, this
+ plugin requires access to `procfs` (`/proc`); on other operating systems,
+ it requires access to execute `ps`.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/processes/README.md
+ introduced: 0.11.0
+ tags: [linux, macos, windows, systems]
+
+ - name: Procstat
+ id: procstat
+ description: |
+ The Procstat input plugin monitors system resource usage of an individual
+ processes using their `/proc` data.
+
+ Processes can be specified either by `pid` file, by executable name, by command
+ line pattern matching, by username, by systemd unit name, or by cgroup name/path
+ (in this order or priority). This plugin uses `pgrep` when an executable name is
+ provided to obtain the `pid`. The Procstat plugin transmits IO, memory, cpu,
+ file descriptor-related measurements for every process specified. A prefix can
+ be set to isolate individual process specific measurements.
+
+ The Procstat input plugin will tag processes according to how they are specified
+ in the configuration. If a pid file is used, a "pidfile" tag will be generated.
+ On the other hand, if an executable is used an "exe" tag will be generated.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/procstat/README.md
+ introduced: 0.2.0
+ tags: [linux, macos, windows, systems]
+
+ - name: Prometheus Format
+ id: prometheus
+ description: |
+ The Prometheus Format input plugin input plugin gathers metrics from HTTP
+ servers exposing metrics in Prometheus format.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/prometheus/README.md
+ introduced: 0.2.1
+ tags: [linux, macos, windows, applications]
+
+ - name: Puppet Agent
+ id: puppetagent
+ description: |
+ The Puppet Agent input plugin collects variables outputted from the `last_run_summary.yaml`
+ file usually located in `/var/lib/puppet/state/` Puppet Agent Runs. For more information, see
+ [Puppet Monitoring: How to Monitor the Success or Failure of Puppet Runs](https://puppet.com/blog/puppet-monitoring-how-to-monitor-success-or-failure-of-puppet-runs)
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/puppetagent
+ introduced: 0.2.0
+ tags: [linux, macos, windows, build-deploy]
+
+ - name: RabbitMQ
+ id: rabbitmq
+ description: |
+ The RabbitMQ input plugin reads metrics from RabbitMQ servers via the
+ [Management Plugin](https://www.rabbitmq.com/management.html).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/rabbitmq/README.md
+ introduced: 0.1.5
+ tags: [linux, macos, windows, messaging]
+
+ - name: Raindrops Middleware
+ id: raindrops
+ description: |
+ The Raindrops Middleware input plugin reads from the specified
+ [Raindrops middleware](http://raindrops.bogomips.org/Raindrops/Middleware.html)
+ URI and adds the statistics to InfluxDB.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/raindrops/README.md
+ introduced: 0.10.3
+ tags: [linux, macos, windows, servers, web]
+
+ - name: Redis
+ id: redis
+ description: |
+ The Redis input plugin gathers the results of the INFO Redis command.
+ There are two separate measurements: `redis` and `redis_keyspace`,
+ the latter is used for gathering database-related statistics.
+
+ Additionally the plugin also calculates the hit/miss ratio (`keyspace_hitrate`)
+ and the elapsed time since the last RDB save (`rdb_last_save_time_elapsed`).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redis/README.md
+ introduced: 0.1.1
+ tags: [linux, macos, windows, data-stores]
+
+ - name: RethinkDB
+ id: rethinkdb
+ description: |
+ The RethinkDB input plugin works with RethinkDB 2.3.5+ databases that requires
+ username, password authorization, and Handshake protocol v1.0.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/rethinkdb
+ introduced: 0.1.3
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Riak
+ id: riak
+ description: |
+ The Riak input plugin gathers metrics from one or more Riak instances.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/riak/README.md
+ introduced: 0.10.4
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Salesforce
+ id: salesforce
+ description: |
+ The Salesforce input plugin gathers metrics about the limits in your Salesforce
+ organization and the remaining usage.
+ It fetches its data from the limits endpoint of the Salesforce REST API.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/salesforce/README.md
+ introduced: 1.4.0
+ tags: [linux, macos, windows, applications, cloud]
+
+ - name: Sensors
+ id: sensors
+ description: |
+ The Sensors input plugin collects collects sensor metrics with the sensors
+ executable from the `lm-sensor` package.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/sensors/README.md
+ introduced: 0.10.1
+ tags: [linux, macos, windows, iot]
+
+ - name: SMART
+ id: smart
+ description: |
+ The SMART input plugin gets metrics using the command line utility `smartctl`
+ for SMART (Self-Monitoring, Analysis and Reporting Technology) storage devices.
+ SMART is a monitoring system included in computer hard disk drives (HDDs)
+ and solid-state drives (SSDs), which include most modern ATA/SATA, SCSI/SAS and NVMe disks.
+ The plugin detects and reports on various indicators of drive reliability,
+ with the intent of enabling the anticipation of hardware failures.
+ See [smartmontools](https://www.smartmontools.org/).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/smart/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, systems]
+
+ - name: SNMP
+ id: snmp
+ description: |
+ The SNMP input plugin gathers metrics from SNMP agents.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/snmp/README.md
+ introduced: 0.10.1
+ tags: [linux, macos, windows, networking]
+
+ - name: SNMP Legacy
+ id: snmp_legacy
+ description: |
+ The SNMP Legacy input plugin gathers metrics from SNMP agents.
+ _Deprecated in Telegraf 1.0.0. Use the [SNMP input plugin](#snmp)._
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/snmp_legacy/README.md
+ introduced: 0.10.1
+ deprecated: 1.0.0
+ tags: [linux, macos, windows, networking]
+
+ - name: Socket Listener
+ id: socket_listener
+ description: |
+ The Socket Listener input plugin listens for messages from streaming (TCP, UNIX)
+ or datagram (UDP, unixgram) protocols. Messages are expected in the
+ [Telegraf Input Data Formats](https://docs.influxdata.com/telegraf/latest/data_formats/input/).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/socket_listener/README.md
+ introduced: 1.3.0
+ tags: [linux, macos, windows, networking]
+
+ - name: Stackdriver
+ id: stackdriver
+ description: |
+ The Stackdriver input plugin gathers metrics from the
+ [Stackdriver Monitoring API](https://cloud.google.com/monitoring/api/v3/).
+
+ > This plugin accesses APIs that are [chargeable](https://cloud.google.com/stackdriver/pricing#monitoring-costs).
+ > You may incur costs.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/stackdriver/README.md
+ introduced: 1.10.0
+ tags: [linux, macos, windows, cloud]
+
+ - name: StatsD
+ id: statsd
+ description: |
+ The StatsD input plugin is a special type of plugin which runs a backgrounded
+ `statsd` listener service while Telegraf is running.
+ StatsD messages are formatted as described in the original
+ [etsy statsd](https://github.com/etsy/statsd/blob/master/docs/metric_types.md) implementation.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/statsd/README.md
+ introduced: 0.2.0
+ tags: [linux, macos, windows, applications]
+
+ - name: Swap
+ id: swap
+ description: |
+ Supports: Linux only.
+
+ The Swap input plugin gathers metrics about swap memory usage.
+ For more information about Linux swap spaces, see
+ [All about Linux swap space](https://www.linux.com/news/all-about-linux-swap-space)
+
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/swap/README.md
+ introduced: 1.7.0
+ tags: [linux, macos, systems]
+
+ - name: Syslog
+ id: syslog
+ description: |
+ The Syslog input plugin listens for syslog messages transmitted over
+ [UDP](https://tools.ietf.org/html/rfc5426) or [TCP](https://tools.ietf.org/html/rfc5425).
+ Syslog messages should be formatted according to [RFC 5424](https://tools.ietf.org/html/rfc5424).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/syslog/README.md
+ introduced: 1.7.0
+ tags: [linux, macos, windows, logging, systems]
+
+ - name: Sysstat
+ id: sysstat
+ description: |
+ The Sysstat input plugin collects [sysstat](https://github.com/sysstat/sysstat)
+ system metrics with the sysstat collector utility `sadc` and parses the created
+ binary data file with the `sadf` utility.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/sysstat
+ introduced: 0.12.1
+ tags: [linux, macos, windows, systems]
+
+ - name: System
+ id: system
+ description: |
+ The System input plugin gathers general stats on system load, uptime, and
+ number of users logged in. It is basically equivalent to the UNIX `uptime` command.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/system/README.md
+ introduced: 0.1.6
+ tags: [linux, macos, windows, systems]
+
+ - name: Tail
+ id: tail
+ description: |
+ The Tail input plugin "tails" a log file and parses each log message.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/tail/README.md
+ introduced: 1.1.2
+ tags: [linux, macos, windows, logging]
+
+ - name: TCP Listener
+ id: tcp_listener
+ description: |
+ _Deprecated in Telegraf 1.3.0. Use the [Socket Listener input plugin](#socket_listener)._
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/tcp_listener/README.md
+ introduced: 0.11.0
+ deprecated: 1.3.0
+ tags: [linux, macos, windows, networking, web]
+
+ - name: Teamspeak 3
+ id: teamspeak
+ description: |
+ The Teamspeak 3 input plugin uses the Teamspeak 3 ServerQuery interface of
+ the Teamspeak server to collect statistics of one or more virtual servers.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/teamspeak/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, applications, gaming]
+
+ - name: Telegraf v1.x
+ id: internal
+ description: |
+ The Telegraf v1.x input plugin collects metrics about the Telegraf v1.x agent itself.
+ Note that some metrics are aggregates across all instances of one type of plugin.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/internal/README.md
+ introduced: 1.2.0
+ tags: [linux, macos, windows, applications]
+
+ - name: Temp
+ id: temp
+ description: |
+ The Temp input plugin collects temperature data from sensors.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/temp/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows, iot]
+
+ - name: Tengine Web Server
+ id: tengine
+ description: |
+ The Tengine Web Server input plugin gathers status metrics from the
+ [Tengine Web Server](http://tengine.taobao.org/) using the
+ [Reqstat module](http://tengine.taobao.org/document/http_reqstat.html).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/tengine/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows, servers, web]
+
+ - name: Trig
+ id: trig
+ description: |
+ The Trig input plugin inserts sine and cosine waves for demonstration purposes.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/trig
+ introduced: 0.3.0
+ tags: [linux, macos, windows]
+
+ - name: Twemproxy
+ id: twemproxy
+ description: |
+ The Twemproxy input plugin gathers data from Twemproxy instances, processes
+ Twemproxy server statistics, processes pool data, and processes backend server
+ (Redis/Memcached) statistics.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/twemproxy
+ introduced: 0.3.0
+ tags: [linux, macos, windows, servers, web]
+
+ - name: UDP Listener
+ id: udp_listener
+ description: |
+ _Deprecated in Telegraf 1.3.0. use the [Socket Listener input plugin](#socket_listener)._
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/udp_listener/README.md
+ introduced: 0.11.0
+ deprecated: 1.3.0
+ tags: [linux, macos, windows, networking]
+
+ - name: Unbound
+ id: unbound
+ description: |
+ The Unbound input plugin gathers statistics from [Unbound](https://www.unbound.net/),
+ a validating, recursive, and caching DNS resolver.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/unbound/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, networking]
+
+ - name: uWSGI
+ id: uwsgi
+ description: |
+ The uWSGI input plugin gathers metrics about uWSGI using the [uWSGI Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/uwsgi/README.md
+ introduced: 1.12.0
+ tags: [linux, macos, windows, cloud]
+
+ - name: Varnish
+ id: varnish
+ description: |
+ The Varnish input plugin gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/varnish/README.md
+ introduced: 0.13.1
+ tags: [linux, macos, windows, networking]
+
+ - name: VMware vSphere
+ id: vsphere
+ description: |
+ The VMware vSphere input plugin uses the vSphere API to gather metrics from
+ multiple vCenter servers (clusters, hosts, VMs, and data stores).
+ For more information on the available performance metrics, see
+ [Common vSphere Performance Metrics](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/vsphere/METRICS.md).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/vsphere/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows, containers]
+
+ - name: Webhooks
+ id: webhooks
+ description: |
+ The Webhooks input plugin starts an HTTPS server and registers multiple webhook listeners.
+
+ #### Available webhooks
+ - [Filestack](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/webhooks/filestack/README.md)
+ - [GitHub](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/webhooks/github/README.md)
+ - [Mandrill](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/webhooks/mandrill/README.md)
+ - [Papertrail](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/webhooks/papertrail/README.md)
+ - [Particle.io](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/webhooks/particle/README.md)
+ - [Rollbar](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/webhooks/rollbar)
+
+ #### Add new webhooks
+ If you need a webhook that is not supported, consider
+ [adding a new webhook](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/webhooks#adding-new-webhooks-plugin).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/webhooks/README.md
+ introduced: 1.0.0
+ tags: [linux, macos, windows, applications, web]
+
+ - name: Windows Performance Counters
+ id: win_perf_counters
+ description: |
+ The Windows Performance Counters input plugin reads Performance Counters on the
+ Windows operating sytem. **Windows only**.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/README.md
+ introduced: 0.10.2
+ tags: [windows, systems]
+
+ - name: Windows Services
+ id: win_services
+ description: |
+ The Windows Services input plugin reports Windows services info. **Windows only**.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_services/README.md
+ introduced: 1.4.0
+ tags: [windows, servers, systems]
+
+ - name: Wireless
+ id: wireless
+ description: |
+ The Wireless input plugin gathers metrics about wireless link quality by
+ reading the `/proc/net/wireless` file. **This plugin currently supports Linux only**.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/wireless/README.md
+ introduced: 1.9.0
+ tags: [linux, networking]
+
+ - name: X.509 Certificate
+ id: x509_cert
+ description: |
+ The X.509 Certificate input plugin provides information about X.509 certificate
+ accessible using the local file or network connection.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/x509_cert/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows, networking]
+
+ - name: ZFS
+ id: zfs
+ description: |
+ Supports: FreeBSD, Linux
+
+ The ZFS input plugin provides metrics from your ZFS filesystems.
+ It supports ZFS on Linux and FreeBSD.
+ It gets ZFS statistics from `/proc/spl/kstat/zfs` on Linux and from `sysctl` and `zpool` on FreeBSD.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/zfs/README.md
+ introduced: 0.2.1
+ tags: [linux, macos, windows, systems]
+
+
+########## %%%% %% %% %%%%%% %%%%% %% %% %%%%%% %%%% ##########
+########## %% %% %% %% %% %% %% %% %% %% %% ##########
+########## %% %% %% %% %% %%%%% %% %% %% %%%% ##########
+########## %% %% %% %% %% %% %% %% %% %% ##########
+########## %%%% %%%% %% %% %%%% %% %%%% ##########
+
+output:
+ - name: Amazon CloudWatch
+ id: cloudwatch
+ description: |
+ The Amazon CloudWatch output plugin send metrics to Amazon CloudWatch.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/cloudwatch/README.md
+ introduced: 0.10.1
+ tags: [linux, macos, windows, cloud]
+
+ - name: Amazon Kinesis
+ id: kinesis
+ description: |
+ The Amazon Kinesis output plugin is an experimental plugin that is still
+ in the early stages of development. It will batch up all of the points into
+ one `PUT` request to Kinesis. This should save the number of API requests
+ by a considerable level.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/kinesis/README.md
+ introduced: 0.2.5
+ tags: [linux, macos, windows, cloud, messaging]
+
+ - name: Amon
+ id: amon
+ description: |
+ The Amon output plugin writes metrics to an [Amon server](https://github.com/amonapp/amon).
+ For details on the Amon Agent, see [Monitoring Agent](https://docs.amon.cx/agent/)
+ and requires a `apikey` and `amoninstance` URL.
+
+ If the point value being sent cannot be converted to a float64 value, the metric is skipped.
+
+ Metrics are grouped by converting any `_` characters to `.` in the Point Name.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/amon/README.md
+ introduced: 0.2.1
+ tags: [linux, macos, windows, databases]
+
+ - name: AMQP
+ id: amqp
+ description: |
+ The AMQP output plugin writes to an AMQP 0-9-1 exchange, a prominent implementation
+ of the Advanced Message Queuing Protocol (AMQP) protocol being [RabbitMQ](https://www.rabbitmq.com/).
+
+ Metrics are written to a topic exchange using `tag`, defined in configuration
+ file as `RoutingTag`, as a routing key.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/amqp/README.md
+ introduced: 0.1.9
+ tags: [linux, macos, windows, messaging]
+
+ - name: Apache Kafka
+ id: kafka
+ description: |
+ The Apache Kafka output plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.html)
+ acting a Kafka Producer.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/kafka/README.md
+ introduced: 0.1.7
+ tags: [linux, macos, windows, messaging]
+
+ - name: CrateDB
+ id: cratedb
+ description: |
+ The CrateDB output plugin writes to [CrateDB](https://crate.io/), a real-time SQL database for
+ machine data and IoT, using its [PostgreSQL protocol](https://crate.io/docs/crate/reference/protocols/postgres.html).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/cratedb/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Datadog
+ id: datadog
+ description: |
+ The Datadog output plugin writes to the [Datadog Metrics API](http://docs.datadoghq.com/api/#metrics)
+ and requires an `apikey` which can be obtained [here](https://app.datadoghq.com/account/settings#api)
+ for the account.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/datadog/README.md
+ introduced: 0.1.6
+ tags: [linux, macos, windows, applications, cloud]
+
+ - name: Discard
+ id: discard
+ description: |
+ The Discard output plugin simply drops all metrics that are sent to it.
+ It is only meant to be used for testing purposes.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/discard/README.md
+ introduced: 1.2.0
+ tags: [linux, macos, windows]
+
+ - name: Elasticsearch
+ id: elasticsearch
+ description: |
+ The Elasticsearch output plugin writes to Elasticsearch via HTTP using
+ [Elastic](http://olivere.github.io/elastic/). Currently it only supports
+ Elasticsearch 5.x series.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/elasticsearch/README.md
+ introduced: 0.1.5
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Exec
+ id: exec
+ description: |
+ The Exec output plugin sends Telegraf metrics to an external application over `stdin`.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/exec/README.md
+ introduced: 1.12.0
+ tags: [linux, macos, windows, systems]
+
+ - name: File
+ id: file
+ description: |
+ The File output plugin writes Telegraf metrics to files.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/file/README.md
+ introduced: 0.10.3
+ tags: [linux, macos, windows, systems]
+
+ - name: Google Cloud PubSub
+ id: cloud_pubsub
+ description: |
+ The Google PubSub output plugin publishes metrics to a [Google Cloud PubSub](https://cloud.google.com/pubsub)
+ topic as one of the supported [output data formats](https://docs.influxdata.com/telegraf/latest/data_formats/output).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/cloud_pubsub/README.md
+ introduced: 1.10.0
+ tags: [linux, macos, windows, messaging, cloud]
+
+ - name: Graphite
+ id: graphite
+ description: |
+ The Graphite output plugin writes to [Graphite](http://graphite.readthedocs.org/en/latest/index.html) via raw TCP.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/graphite/README.md
+ introduced: 0.10.1
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Graylog
+ id: graylog
+ description: |
+ The Graylog output plugin writes to a Graylog instance using the `gelf` format.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/graylog/README.md
+ introduced: 1.0.0
+ tags: [linux, macos, windows, logging]
+
+ - name: HTTP
+ id: http
+ description: |
+ The HTTP output plugin sends metrics in a HTTP message encoded using one of the output data formats.
+ For `data_formats` that support batching, metrics are sent in batch format.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/http/README.md
+ introduced: 1.7.0
+ tags: [linux, macos, windows, applications]
+
+ - name: Health
+ id: health
+ description: |
+ The health plugin provides a HTTP health check resource that can be configured to return a failure status code based on the value of a metric.
+ When the plugin is healthy it will return a 200 response; when unhealthy it will return a 503 response. The default state is healthy, one or more checks must fail in order for the resource to enter the failed state.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/health/README.md
+ introduced: 1.11.0
+ tags: [linux, macos, windows, applications]
+
+ - name: InfluxDB v1.x
+ id: influxdb
+ description: |
+ The InfluxDB v1.x output plugin writes to InfluxDB using HTTP or UDP.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb/README.md
+ introduced: 0.1.1
+ tags: [linux, macos, windows, data-stores]
+
+ - name: InfluxDB v2
+ id: influxdb_v2
+ description: |
+ The InfluxDB v2 output plugin writes metrics to [InfluxDB 2.0](https://github.com/influxdata/influxdb).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb_v2/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Instrumental
+ id: instrumental
+ description: |
+ The Instrumental output plugin writes to the [Instrumental Collector API](https://instrumentalapp.com/docs/tcp-collector)
+ and requires a Project-specific API token.
+
+ Instrumental accepts stats in a format very close to Graphite, with the only
+ difference being that the type of stat (gauge, increment) is the first token,
+ separated from the metric itself by whitespace. The increment type is only used
+ if the metric comes in as a counter through `[[inputs.statsd]]`.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/instrumental/README.md
+ introduced: 0.13.1
+ tags: [linux, macos, windows, applications]
+
+ - name: Librato
+ id: librato
+ description: |
+ The Librato output plugin writes to the [Librato Metrics API](http://dev.librato.com/v1/metrics#metrics)
+ and requires an `api_user` and `api_token` which can be obtained
+ [here](https://metrics.librato.com/account/api_tokens) for the account.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/librato/README.md
+ introduced: 0.2.0
+ tags: [linux, macos, windows, cloud]
+
+ - name: Microsoft Azure Application Insights
+ id: application_insights
+ description: |
+ The Microsoft Azure Application Insights output plugin writes Telegraf metrics to
+ [Application Insights (Microsoft Azure)](https://azure.microsoft.com/en-us/services/application-insights/).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/application_insights/README.md
+ introduced: 1.7.0
+ tags: [linux, macos, windows, cloud, applications]
+
+ - name: Microsoft Azure Monitor
+ id: azure_monitor
+ description: |
+ > The Azure Monitor custom metrics service is currently in preview and not
+ > available in a subset of Azure regions.
+
+ The Microsoft Azure Monitor output plugin sends custom metrics to
+ [Microsoft Azure Monitor](https://azure.microsoft.com/en-us/services/monitor/).
+ Azure Monitor has a metric resolution of one minute. To handle this in Telegraf,
+ the Azure Monitor output plugin automatically aggregates metrics into one minute buckets,
+ which are then sent to Azure Monitor on every flush interval.
+
+ For a Microsoft blog posting on using Telegraf with Microsoft Azure Monitor,
+ see [Collect custom metrics for a Linux VM with the InfluxData Telegraf Agent](https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/metrics-store-custom-linux-telegraf).
+
+ The metrics from each input plugin will be written to a separate Azure Monitor namespace,
+ prefixed with `Telegraf/` by default. The field name for each metric is written
+ as the Azure Monitor metric name. All field values are written as a summarized set
+ that includes `min`, `max`, `sum`, and `count`. Tags are written as a dimension
+ on each Azure Monitor metric.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/azure_monitor/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows, cloud]
+
+ - name: MQTT Producer
+ id: mqtt
+ description: |
+ The MQTT Producer output plugin writes to the MQTT server using
+ [supported output data formats](https://docs.influxdata.com/telegraf/latest/data_formats/output/).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/mqtt/README.md
+ introduced: 0.2.0
+ tags: [linux, macos, windows, messaging]
+
+ - name: NATS Output
+ id: nats
+ description: |
+ The NATS Output output plugin writes to a (list of) specified NATS instance(s).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/nats/README.md
+ introduced: 1.1.0
+ tags: [linux, macos, windows, messaging]
+
+ - name: NSQ
+ id: nsq
+ description: |
+ The NSQ output plugin writes to a specified NSQD instance, usually local to the producer.
+ It requires a server name and a topic name.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/nsq/README.md
+ introduced: 0.2.1
+ tags: [linux, macos, windows, messaging]
+
+ - name: OpenTSDB
+ id: opentsdb
+ description: |
+ The OpenTSDB output plugin writes to an OpenTSDB instance using either the telnet or HTTP mode.
+
+ Using the HTTP API is the recommended way of writing metrics since OpenTSDB 2.0.
+ To use HTTP mode, set `useHttp` to true in config. You can also control how many
+ metrics are sent in each HTTP request by setting `batchSize` in config.
+ See the [OpenTSDB documentation](http://opentsdb.net/docs/build/html/api_http/put.html) for details.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/opentsdb/README.md
+ introduced: 0.1.9
+ tags: [linux, macos, windows, data-stores]
+
+ - name: Prometheus Client
+ id: prometheus_client
+ description: |
+ The Prometheus Client output plugin starts a [Prometheus](https://prometheus.io/) Client,
+ it exposes all metrics on `/metrics` (default) to be polled by a Prometheus server.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/prometheus_client/README.md
+ introduced: 0.2.1
+ tags: [linux, macos, windows, applications, data-stores]
+
+ - name: Riemann
+ id: riemann
+ description: |
+ The Riemann output plugin writes to [Riemann](http://riemann.io/) using TCP or UDP.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/riemann/README.md
+ introduced: 1.3.0
+ tags: [linux, macos, windows, networking, systems]
+
+ - name: Riemann Legacy
+ id: riemann_legacy
+ description: |
+ The Riemann Legacy output plugin will be deprecated in a future release,
+ see [#1878](https://github.com/influxdata/telegraf/issues/1878) for more details & discussion.
+ link: https://github.com/influxdata/telegraf/tree/master/plugins/outputs/riemann_legacy
+ introduced: 0.2.3
+ deprecated: 1.3.0
+ tags: [linux, macos, windows, applications]
+
+ - name: Socket Writer
+ id: socket_writer
+ description: |
+ The Socket Writer output plugin writes to a UDP, TCP, or UNIX socket.
+ It can output data in any of the [supported output formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md).
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/socket_writer/README.md
+ introduced: 1.3.0
+ tags: [linux, macos, windows, networking]
+
+ - name: Stackdriver
+ id: stackdriver
+ description: |
+ The Stackdriver output plugin writes to the [Google Cloud Stackdriver API](https://cloud.google.com/monitoring/api/v3/)
+ and requires [Google Cloud authentication](https://cloud.google.com/docs/authentication/getting-started)
+ with Google Cloud using either a service account or user credentials.
+ For details on pricing, see the [Stackdriver documentation](https://cloud.google.com/stackdriver/pricing).
+
+ Requires `project` to specify where Stackdriver metrics will be delivered to.
+
+ Metrics are grouped by the `namespace` variable and metric key, for example
+ `custom.googleapis.com/telegraf/system/load5`.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/stackdriver/README.md
+ introduced: 1.9.0
+ tags: [linux, macos, windows, cloud]
+
+ - name: Syslog
+ id: syslog
+ description: |
+ The syslog output plugin sends syslog messages transmitted over UDP or TCP or TLS, with or without the octet counting framing.
+ Syslog messages are formatted according to RFC 5424.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/syslog/README.md
+ introduced: 1.11.0
+ tags: [linux, macos, windows, logging]
+
+ - name: Wavefront
+ id: wavefront
+ description: |
+ The Wavefront output plugin writes to a Wavefront proxy, in Wavefront data format over TCP.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/outputs/wavefront/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows, applications, cloud]
+
+
+# %%%% %%%% %%%% %%%%% %%%%%% %%%% %%%% %%%%%% %%%% %%%%% %%%% #
+# %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% #
+# %%%%%% %% %%% %% %%% %%%%% %%%% %% %%% %%%%%% %% %% %% %%%%% %%%% #
+# %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% #
+# %% %% %%%% %%%% %% %% %%%%%% %%%% %% %% %% %%%% %% %% %%%% #
+
+aggregator:
+ - name: BasicStats
+ id: basicstats
+ description: |
+ The BasicStats aggregator plugin gives `count`, `max`, `min`, `mean`, `s2`(variance),
+ and `stdev` for a set of values, emitting the aggregate every period seconds.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/aggregators/basicstats/README.md
+ introduced: 1.5.0
+ tags: [linux, macos, windows]
+
+ - name: Final
+ id: final
+ description: |
+ The final aggregator emits the last metric of a contiguous series.
+ A contiguous series is defined as a series which receives updates within the time period in series_timeout.
+ The contiguous series may be longer than the time interval defined by period.
+ This is useful for getting the final value for data sources that produce discrete time series, such as procstat, cgroup, kubernetes, etc.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/aggregators/final/README.md
+ introduced: 1.11.0
+ tags: [linux, macos, windows]
+
+ - name: Histogram
+ id: histogram
+ description: |
+ The Histogram aggregator plugin creates histograms containing the counts of
+ field values within a range.
+
+ Values added to a bucket are also added to the larger buckets in the distribution.
+ This creates a [cumulative histogram](https://upload.wikimedia.org/wikipedia/commons/5/53/Cumulative_vs_normal_histogram.svg).
+
+ Like other Telegraf aggregator plugins, the metric is emitted every period seconds.
+ Bucket counts, however, are not reset between periods and will be non-strictly
+ increasing while Telegraf is running.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/aggregators/histogram/README.md
+ introduced: 1.4.0
+ tags: [linux, macos, windows]
+
+ - name: MinMax
+ id: minmax
+ description: |
+ The MinMax aggregator plugin aggregates `min` and `max` values of each field it sees,
+ emitting the aggregrate every period seconds.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/aggregators/minmax/README.md
+ introduced: 1.1.0
+ tags: [linux, macos, windows]
+
+ - name: ValueCounter
+ id: valuecounter
+ description: |
+ The ValueCounter aggregator plugin counts the occurrence of values in fields
+ and emits the counter once every 'period' seconds.
+
+ A use case for the ValueCounter aggregator plugin is when you are processing
+ an HTTP access log with the [Logparser input plugin](#logparser) and want to
+ count the HTTP status codes.
+
+ The fields which will be counted must be configured with the fields configuration directive.
+ When no fields are provided, the plugin will not count any fields.
+ The results are emitted in fields, formatted as `originalfieldname_fieldvalue = count`.
+
+ ValueCounter only works on fields of the type `int`, `bool`, or `string`.
+ Float fields are being dropped to prevent the creating of too many fields.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/aggregators/valuecounter/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows]
+
+
+# %%%%% %%%%% %%%% %%%% %%%%%% %%%% %%%% %%%% %%%%% %%%% #
+# %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% #
+# %%%%% %%%%% %% %% %% %%%% %%%% %%%% %% %% %%%%% %%%% #
+# %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% #
+# %% %% %% %%%% %%%% %%%%%% %%%% %%%% %%%% %% %% %%%% #
+
+processor:
+ - name: Converter
+ id: converter
+ description: |
+ The Converter processor plugin is used to change the type of tag or field values.
+ In addition to changing field types, it can convert between fields and tags.
+ Values that cannot be converted are dropped.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/processors/converter/README.md
+ introduced: 1.7.0
+ tags: [linux, macos, windows]
+
+ - name: Date
+ id: date
+ description: |
+ The Date processor plugin adds the metric timestamp as a human readable tag.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/processors/date/README.md
+ introduced: 1.12.0
+ tags: [linux, macos, windows]
+
+ - name: Enum
+ id: enum
+ description: |
+ The Enum processor plugin allows the configuration of value mappings for metric fields.
+ The main use case for this is to rewrite status codes such as `red`, `amber`, and `green`
+ by numeric values such as `0`, `1`, `2`. The plugin supports string and bool types for the field values.
+ Multiple Fields can be configured with separate value mappings for each field.
+ Default mapping values can be configured to be used for all values, which are
+ not contained in the value_mappings.
+ The processor supports explicit configuration of a destination field.
+ By default the source field is overwritten.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/processors/enum/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows]
+
+ - name: Override
+ id: override
+ description: |
+ The Override processor plugin allows overriding all modifications that are supported
+ by input plugins and aggregator plugins:
+
+ - `name_override`
+ - `name_prefix`
+ - `name_suffix`
+ - tags
+
+ All metrics passing through this processor will be modified accordingly.
+ Select the metrics to modify using the standard measurement filtering options.
+
+ Values of `name_override`, `name_prefix`, `name_suffix`, and already present
+ tags with conflicting keys will be overwritten. Absent tags will be created.
+
+ Use case of this plugin encompass ensuring certain tags or naming conventions
+ are adhered to irrespective of input plugin configurations, e.g., by `taginclude`.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/processors/override/README.md
+ introduced: 1.6.0
+ tags: [linux, macos, windows]
+
+ - name: Parser
+ id: parser
+ description: |
+ The Parser processor plugin parses defined fields containing the specified data
+ format and creates new metrics based on the contents of the field.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/processors/parser/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows]
+
+ - name: Pivot
+ id: pivot
+ description: |
+ The Pivot processor plugin rotates single-valued metrics into a multi-field metric.
+ This transformation often results in data that is easier to use with mathematical operators and comparisons.
+ It also flattens data into a more compact representation for write operations with some output data formats.
+
+ _To perform the reverse operation use the [Unpivot](#unpivot) processor._
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/processors/pivot/README.md
+ introduced: 1.12.0
+ tags: [linux, macos, windows]
+
+ - name: Printer
+ id: printer
+ description: |
+ The Printer processor plugin simply prints every metric passing through it.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/processors/printer/README.md
+ introduced: 1.1.0
+ tags: [linux, macos, windows]
+
+ - name: Regex
+ id: regex
+ description: |
+ The Regex processor plugin transforms tag and field values using a regular expression (regex) pattern.
+ If `result_key `parameter is present, it can produce new tags and fields from existing ones.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/processors/regex/README.md
+ introduced: 1.7.0
+ tags: [linux, macos, windows]
+
+ - name: Rename
+ id: rename
+ description: |
+ The Rename processor plugin renames InfluxDB measurements, fields, and tags.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/processors/rename/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows]
+
+ - name: Strings
+ id: strings
+ description: |
+ The Strings processor plugin maps certain Go string functions onto InfluxDB
+ measurement, tag, and field values. Values can be modified in place or stored
+ in another key.
+
+ Implemented functions are:
+
+ - `lowercase`
+ - `uppercase`
+ - `trim`
+ - `trim_left`
+ - `trim_right`
+ - `trim_prefix`
+ - `trim_suffix`
+
+ Note that in this implementation these are processed in the order that they appear above.
+ You can specify the `measurement`, `tag` or `field` that you want processed in each
+ section and optionally a `dest` if you want the result stored in a new tag or field.
+ You can specify lots of transformations on data with a single strings processor.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/processors/strings/README.md
+ introduced: 1.8.0
+ tags: [linux, macos, windows]
+
+ - name: Tag Limit
+ id: tag_limit
+ description: |
+ The Tag Limit processor plugin preserves only a certain number of tags for any given metric
+ and chooses the tags to preserve when the number of tags appended by the data source is over the limit.
+
+ This can be useful when dealing with output systems (e.g. Stackdriver) that impose
+ hard limits on the number of tags or labels per metric or where high levels of
+ cardinality are computationally or financially expensive.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/processors/tag_limit/README.md
+ introduced: 1.12.0
+ tags: [linux, macos, windows]
+
+ - name: TopK
+ id: topk
+ description: |
+ The TopK processor plugin is a filter designed to get the top series over a period of time.
+ It can be tweaked to do its top `K` computation over a period of time, so spikes
+ can be smoothed out.
+
+ This processor goes through the following steps when processing a batch of metrics:
+
+ 1. Groups metrics in buckets using their tags and name as key.
+ 2. Aggregates each of the selected fields for each bucket by the selected aggregation function (sum, mean, etc.).
+ 3. Orders the buckets by one of the generated aggregations, returns all metrics in the top `K` buckets, then reorders the buckets by the next of the generated aggregations, returns all metrics in the top `K` buckets, etc, etc, etc, until it runs out of fields.
+
+ The plugin makes sure not to duplicate metrics.
+
+ Note that depending on the amount of metrics on each computed bucket, more
+ than `K` metrics may be returned.
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/processors/topk/README.md
+ introduced: 1.7.0
+ tags: [linux, macos, windows]
+
+ - name: Unpivot
+ id: unpivot
+ description: |
+ The Unpivot processor plugin rotates a multi-field series into single-valued metrics.
+ This transformation often results in data that is easier to aggregate across fields.
+
+ _To perform the reverse operation use the [Pivot](#pivot) processor._
+ link: https://github.com/influxdata/telegraf/blob/master/plugins/processors/unpivot/README.md
+ introduced: 1.12.0
+ tags: [linux, macos, windows]
diff --git a/data/versions.yaml b/data/versions.yaml
index 0c28bd5ba..74d5f45a4 100644
--- a/data/versions.yaml
+++ b/data/versions.yaml
@@ -1 +1,2 @@
stable_version: v2.0
+telegraf_version: 1.12.0
diff --git a/deploy/docs-website.yml b/deploy/docs-website.yml
index b2fc0a607..5a1c1b64b 100644
--- a/deploy/docs-website.yml
+++ b/deploy/docs-website.yml
@@ -156,55 +156,69 @@ Resources:
Description: Lambda function performing request URI rewriting.
Code:
ZipFile: |
- const config = {
- suffix: '.html',
- appendToDirs: 'index.html',
- removeTrailingSlash: false,
- };
+ 'use strict';
- const regexSuffixless = /\/[a-z0-9]+([0-9\.]+)?$/; // e.g. "/some/page" but not "/", "/some/" or "/some.jpg"
- const regexTrailingSlash = /.+\/$/; // e.g. "/some/" or "/some/page/" but not root "/"
-
- exports.handler = function handler(event, context, callback) {
+ exports.handler = (event, context, callback) => {
const { request } = event.Records[0].cf;
- const { uri } = request;
- const { suffix, appendToDirs, removeTrailingSlash } = config;
+ const { uri, headers, origin } = request;
+ const extension = uri.substr(uri.lastIndexOf('.') + 1);
- // Append ".html" to origin request
- if (suffix && uri.match(regexSuffixless)) {
- request.uri = uri + suffix;
- callback(null, request);
- return;
+ const validExtensions = ['.html', '.css', '.js', '.xml', '.png', '.jpg', '.svg', '.json', '.csv', '.rb', '.otf', '.eot', '.ttf', '.woff'];
+ const indexPath = 'index.html';
+ const defaultPath = '/v2.0/'
+
+ // If path ends with '/', then append 'index.html', otherwise redirect to a
+ // path with '/' or ignore if the path ends with a valid file extension.
+ if ((uri == '/') || (uri.length < defaultPath.length)) {
+ callback(null, {
+ status: '302',
+ statusDescription: 'Found',
+ headers: {
+ location: [{
+ key: 'Location',
+ value: defaultPath,
+ }],
+ }
+ });
+ } else if (uri.endsWith('/')) {
+ request.uri = uri + indexPath;
+ } else if (uri.endsWith('/index.html')) {
+ callback(null, {
+ status: '302',
+ statusDescription: 'Found',
+ headers: {
+ location: [{
+ key: 'Location',
+ value: uri.substr(0, uri.length - indexPath.length),
+ }],
+ }
+ });
+ } else if (validExtensions.filter((ext) => uri.endsWith(ext)) == 0) {
+ callback(null, {
+ status: '302',
+ statusDescription: 'Found',
+ headers: {
+ location: [{
+ key: 'Location',
+ value: uri + '/',
+ }],
+ }
+ });
}
- // Append "index.html" to origin request
- if (appendToDirs && uri.match(regexTrailingSlash)) {
- request.uri = uri + appendToDirs;
- callback(null, request);
- return;
- }
+ const pathsV1 = ['/influxdb', '/telegraf', '/chronograf', '/kapacitor', '/enterprise_influxdb', '/enterprise_kapacitor'];
+ const originV1 = process.env.ORIGIN_V1;
- // Redirect (301) non-root requests ending in "/" to URI without trailing slash
- if (removeTrailingSlash && uri.match(/.+\/$/)) {
- const response = {
- // body: '',
- // bodyEncoding: 'text',
- headers: {
- 'location': [{
- key: 'Location',
- value: uri.slice(0, -1)
- }]
- },
- status: '301',
- statusDescription: 'Moved Permanently'
- };
- callback(null, response);
- return;
+ // Send to v1 origin if start of path matches
+ if (pathsV1.filter((path) => uri.startsWith(path)) > 0) {
+ headers['host'] = [{key: 'host', value: originV1}];
+ origin.s3.domainName = originV1;
}
// If nothing matches, return request unchanged
callback(null, request);
};
+
Handler: index.handler
MemorySize: 128
Role: !Sub ${DocsOriginRequestRewriteLambdaRole.Arn}
diff --git a/layouts/_default/api.html b/layouts/_default/api.html
new file mode 100644
index 000000000..68a09ae66
--- /dev/null
+++ b/layouts/_default/api.html
@@ -0,0 +1 @@
+{{ .Content }}
diff --git a/layouts/_default/version-landing.html b/layouts/_default/version-landing.html
index db50091d7..92bc61314 100644
--- a/layouts/_default/version-landing.html
+++ b/layouts/_default/version-landing.html
@@ -6,35 +6,44 @@
- To provide feedback or report a bug, send an email to cloudbeta@influxdata.com.
+ The following resources are available when you need help with {{ $cloudNameShort }}: