diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index b3f3243f0..cf22fe34d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -241,6 +241,15 @@ Truncated markdown content here.
{{% /truncate %}}
```
+### Generate a list of children articles
+Section landing pages often contain just a list of articles with links and descriptions for each.
+This can be cumbersome to maintain as content is added.
+To automate the listing of articles in a section, use the `{{< children >}}` shortcode.
+
+```md
+{{< children >}}
+```
+
### Reference content
The InfluxDB documentation is "task-based," meaning content primarily focuses on
what a user is **doing**, not what they are **using**.
diff --git a/assets/js/content-interactions.js b/assets/js/content-interactions.js
index e6b312ac8..4e02a7125 100644
--- a/assets/js/content-interactions.js
+++ b/assets/js/content-interactions.js
@@ -14,7 +14,8 @@ $(".article--content h2, \
var elementWhiteList = [
".tabs p a",
".code-tabs p a",
- ".truncate-toggle"
+ ".truncate-toggle",
+ ".children-links a"
]
$('.article a[href^="#"]:not(' + elementWhiteList + ')').click(function (e) {
diff --git a/assets/styles/layouts/_layout-article.scss b/assets/styles/layouts/_layout-article.scss
index 7ff7dbe05..75b5e52cd 100644
--- a/assets/styles/layouts/_layout-article.scss
+++ b/assets/styles/layouts/_layout-article.scss
@@ -142,7 +142,7 @@
color: $article-code;
}
- p, li, table{
+ p,li,table,h2,h3,h4,h5,h6 {
code {
padding: .15rem .45rem .25rem;
border-radius: $border-radius;
@@ -240,6 +240,26 @@
}
}
+ ///////////////////////// Landing Page Article Links /////////////////////////
+
+ .children-links {
+ h2,h3,h4 {
+ margin-top: -.5rem;
+
+ a a:after {
+ content: "\e919";
+ font-family: "icomoon";
+ color: rgba($article-heading, .35);
+ vertical-align: bottom;
+ transition: color .2s;
+ margin-left: .4rem;
+ }
+ a:hover {
+ &:after { color: $article-link; }
+ }
+ }
+ }
+
////////////////// Blockquotes, Notes, Warnings, & Messages //////////////////
blockquote,
@@ -547,7 +567,7 @@
}
}
- ///////////////////////////////// Scroll Bars //////////////////////////////////
+ //////////////////////////////// Scroll Bars /////////////////////////////////
pre { @include scrollbar($article-code-bg, $article-code-scrollbar); }
table { @include scrollbar($article-table-row-alt, $article-code-scrollbar);}
@@ -560,6 +580,48 @@
pre { @include scrollbar($article-warn-code-bg, $article-warn-code-scrollbar); }
table { @include scrollbar($article-warn-table-row-alt, $article-warn-code-scrollbar); }
}
+
+ ////////////////////////// Guides Pagination Buttons /////////////////////////
+
+ .page-nav-btns {
+ display: flex;
+ justify-content: space-between;
+ margin: 3rem 0 1rem;
+
+ .btn {
+ display: flex;
+ max-width: 49%;
+ color: $article-btn-text;
+ background: $article-btn;
+ border-radius: $border-radius;
+ text-align: center;
+ align-items: center;
+ &:hover {
+ background: $article-btn-hover;
+ }
+
+ &.prev{
+ margin-right: auto;
+ padding: .75rem 1.25rem .75rem .75rem;
+ &:before {
+ content: "\e90a";
+ font-family: "icomoon";
+ margin-right: .5rem;
+ vertical-align: middle;
+ }
+ }
+ &.next {
+ margin-left: auto;
+ padding: .75rem .75rem .75rem 1.25rem;
+ &:after {
+ content: "\e90c";
+ font-family: "icomoon";
+ margin-left: .5rem;
+ vertical-align: middle;
+ }
+ }
+ }
+ }
}
diff --git a/assets/styles/themes/_theme-dark.scss b/assets/styles/themes/_theme-dark.scss
index 2577a5746..227b14417 100644
--- a/assets/styles/themes/_theme-dark.scss
+++ b/assets/styles/themes/_theme-dark.scss
@@ -116,6 +116,12 @@ $article-tab-code-text-hover: $g20-white !default;
$article-tab-code-bg-hover: $b-ocean !default;
$article-tab-code-active-text: $g20-white !default;
+// Article page buttons
+$article-btn: $b-ocean !default;
+$article-btn-text: $g20-white !default;
+$article-btn-hover: $b-pool !default;
+$article-btn-text-hover: $g20-white !default;
+
// Left Navigation
$nav-category: $b-ocean !default;
$nav-category-hover: $g20-white !default;
diff --git a/assets/styles/themes/_theme-light.scss b/assets/styles/themes/_theme-light.scss
index 289f0050b..e352dd786 100644
--- a/assets/styles/themes/_theme-light.scss
+++ b/assets/styles/themes/_theme-light.scss
@@ -115,6 +115,12 @@ $article-tab-code-text-hover: $g20-white;
$article-tab-code-bg-hover: $p-comet;
$article-tab-code-active-text: $p-star;
+// Article page buttons
+$article-btn: $b-pool;
+$article-btn-text: $g20-white;
+$article-btn-hover: $b-ocean;
+$article-btn-text-hover: $g20-white;
+
// Left Navigation
$nav-category: $b-ocean;
$nav-category-hover: $gr-viridian;
diff --git a/content/v2.0/example.md b/content/v2.0/example.md
index e1a64c78a..25eca418a 100644
--- a/content/v2.0/example.md
+++ b/content/v2.0/example.md
@@ -7,6 +7,7 @@ menu:
weight: 1
#enterprise_all: true
enterprise_some: true
+draft: true
---
This is a paragraph. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc rutrum, metus id scelerisque euismod, erat ante suscipit nibh, ac congue enim risus id est. Etiam tristique nisi et tristique auctor. Morbi eu bibendum erat. Sed ullamcorper, dui id lobortis efficitur, mauris odio pharetra neque, vel tempor odio dolor blandit justo.
diff --git a/content/v2.0/query-data/_index.md b/content/v2.0/query-data/_index.md
new file mode 100644
index 000000000..10886830e
--- /dev/null
+++ b/content/v2.0/query-data/_index.md
@@ -0,0 +1,16 @@
+---
+title: Query data in InfluxDB
+seotitle: Query data stored in InfluxDB
+description: >
+ Learn to query data stored in InfluxDB using Flux and tools such as the InfluxDB
+ user interface and the 'influx' command line interface.
+menu:
+ v2_0:
+ name: Query data
+ weight: 2
+---
+
+Learn to query data stored in InfluxDB using Flux and tools such as the InfluxDB
+user interface and the 'influx' command line interface.
+
+{{< children >}}
diff --git a/content/v2.0/query-data/execute-queries.md b/content/v2.0/query-data/execute-queries.md
new file mode 100644
index 000000000..43ec6c883
--- /dev/null
+++ b/content/v2.0/query-data/execute-queries.md
@@ -0,0 +1,90 @@
+---
+title: Execute queries
+seotitle: Different ways to query InfluxDB
+description: There are multiple ways to query data from InfluxDB including the the InfluxDB UI, CLI, and API.
+menu:
+ v2_0:
+ name: Execute queries
+ parent: Query data
+ weight: 2
+---
+
+There are multiple ways to execute queries with InfluxDB.
+This guide covers the different options:
+
+1. [Data Explorer](#data-explorer)
+2. [Influx REPL](#influx-repl)
+3. [Influx query command](#influx-query-command)
+5. [InfluxDB API](#influxdb-api)
+
+## Data Explorer
+Queries can be built, executed, and visualized in InfluxDB UI's Data Explorer.
+
+
+
+## Influx REPL
+The [`influx repl` command](/v2.0/reference/cli/influx/repl) starts an interactive
+read-eval-print-loop (REPL) where you can write and execute Flux queries.
+
+```bash
+influx repl --org org-name
+```
+
+_**Note:** `ctrl-d` will close the REPL._
+
+## Influx query command
+You can pass queries to the [`influx query` command](/v2.0/reference/cli/influx/query)
+as either a file or raw Flux via stdin.
+
+###### Run a query from a file
+```bash
+influx query @/path/to/query.flux
+```
+
+###### Pass raw Flux via stdin pipe
+```bash
+influx query - # Return to open the pipe
+
+data = from(bucket: "example-bucket") |> range(start: -10m) # ...
+# ctrl-d to close the pipe and submit the query
+```
+
+## InfluxDB API
+Query InfluxDB through the `/api/v2/query` endpoint.
+Queried data is returned in annotated CSV format.
+
+In your request, set the following:
+
+- `Authorization` header to `Token ` + your authentication token.
+- `accept` header to `application/csv`
+- `content-type` header to `application/vnd.flux`
+
+This allows you to POST the Flux query in plain text and receive the annotated CSV response.
+
+Below is an example `curl` command that queries InfluxDB:
+
+{{< code-tabs-wrapper >}}
+{{% code-tabs %}}
+[Multi-line](#)
+[Single-line](#)
+{{% /code-tabs %}}
+
+{{% code-tab-content %}}
+```bash
+curl http://localhost:9999/api/v2/query -XPOST -sS \
+-H 'Authorization: Token YOURAUTHTOKEN' \
+-H 'accept:application/csv' \
+-H 'content-type:application/vnd.flux' \
+-d 'from(bucket:“test”)
+ |> range(start:-1000h)
+ |> group(columns:[“_measurement”], mode:“by”)
+ |> sum()'
+```
+{{% /code-tab-content %}}
+
+{{% code-tab-content %}}
+```bash
+curl http://localhost:9999/api/v2/query -XPOST -sS -H 'Authorization: Token TOKENSTRINGHERE' -H 'accept:application/csv' -H 'content-type:application/vnd.flux' -d 'from(bucket:“test”) |> range(start:-1000h) |> group(columns:[“_measurement”], mode:“by”) |> sum()'
+```
+{{% /code-tab-content %}}
+{{< /code-tabs-wrapper >}}
diff --git a/content/v2.0/query-data/get-started/_index.md b/content/v2.0/query-data/get-started/_index.md
new file mode 100644
index 000000000..aaf1c5888
--- /dev/null
+++ b/content/v2.0/query-data/get-started/_index.md
@@ -0,0 +1,78 @@
+---
+title: Get started with Flux
+description: >
+ Get started with Flux, InfluxData's functional data scripting language.
+ This step-by-step guide through the basics of writing a Flux query.
+menu:
+ v2_0:
+ name: Get started with Flux
+ parent: Query data
+ weight: 1
+---
+
+Flux is InfluxData's functional data scripting language designed for querying,
+analyzing, and acting on data.
+
+This multi-part getting started guide walks through important concepts related to Flux,
+how to query time series data from InfluxDB using Flux, and introduces Flux syntax and functions.
+
+## Flux design principles
+Flux is designed to be usable, readable, flexible, composable, testable, contributable, and shareable.
+Its syntax is largely inspired by [2018's most popular scripting language](https://insights.stackoverflow.com/survey/2018#technology),
+Javascript, and takes a functional approach to data exploration and processing.
+
+The following example illustrates querying data stored from the last five minutes,
+filtering by the `cpu` measurement and the `cpu=cpu-usage` tag, windowing the data in 1 minute intervals,
+and calculating the average of each window:
+
+```js
+from(bucket:"example-bucket")
+ |> range(start:-1h)
+ |> filter(fn:(r) =>
+ r._measurement == "cpu" and
+ r.cpu == "cpu-total"
+ )
+ |> aggregateWindow(every: 1m, fn: mean)
+```
+
+## Key concepts
+Flux introduces important new concepts you should understand as you get started.
+
+### Pipe-forward operator
+Flux uses pipe-forward operators (`|>`) extensively to chain operations together.
+After each function or operation, Flux returns a table or collection of tables containing data.
+The pipe-forward operator pipes those tables into the next function or operation where
+they are further processed or manipulated.
+
+### Tables
+Flux structures all data in tables.
+When data is streamed from data sources, Flux formats it as annotated
+comma-separated values (CSV), representing tables.
+Functions then manipulate or process them and output new tables.
+
+#### Group keys
+Every table has a **group key** which describes the contents of the table.
+It's a list of columns for which every row in the table will have the same value.
+Columns with unique values in each row are **not** part of the group key.
+
+As functions process and transform data, each modifies the group keys of output tables.
+Understanding how tables and group keys are modified by functions is key to properly
+shaping your data for the desired output.
+
+###### Example group key
+```js
+[_start, _stop, _field, _measurement, host]
+```
+
+Note that `_time` and `_value` are excluded from the example group key because they
+are unique to each row.
+
+## Tools for working with Flux
+
+The [Execute queries](/v2.0/query-data/execute-queries) guide walks through
+the different tools available for querying InfluxDB with Flux.
+
+
diff --git a/content/v2.0/query-data/get-started/query-influxdb.md b/content/v2.0/query-data/get-started/query-influxdb.md
new file mode 100644
index 000000000..16ee0c5d7
--- /dev/null
+++ b/content/v2.0/query-data/get-started/query-influxdb.md
@@ -0,0 +1,130 @@
+---
+title: Query InfluxDB with Flux
+description: Learn the basics of using Flux to query data from InfluxDB.
+menu:
+ v2_0:
+ name: Query InfluxDB
+ parent: Get started with Flux
+ weight: 1
+---
+
+This guide walks through the basics of using Flux to query data from InfluxDB.
+Every Flux query needs the following:
+
+1. [A data source](#1-define-your-data-source)
+2. [A time range](#2-specify-a-time-range)
+3. [Data filters](#3-filter-your-data)
+
+
+## 1. Define your data source
+Flux's [`from()`](/v2.0/reference/flux/functions/inputs/from) function defines an InfluxDB data source.
+It requires a [`bucket`](/v2.0/reference/flux/functions/inputs/from#bucket) parameter.
+The following examples use `example-bucket` as the bucket name.
+
+```js
+from(bucket:"example-bucket")
+```
+
+## 2. Specify a time range
+Flux requires a time range when querying time series data.
+"Unbounded" queries are very resource-intensive and as a protective measure,
+Flux will not query the database without a specified range.
+
+Use the pipe-forward operator (`|>`) to pipe data from your data source into the [`range()`](/v2.0/reference/flux/functions/transformations/range)
+function, which specifies a time range for your query.
+It accepts two properties: `start` and `stop`.
+Ranges can be **relative** using negative [durations](/v2.0/reference/flux/language/lexical-elements#duration-literals)
+or **absolute** using [timestamps](/v2.0/reference/flux/language/lexical-elements#date-and-time-literals).
+
+###### Example relative time ranges
+```js
+// Relative time range with start only. Stop defaults to now.
+from(bucket:"example-bucket")
+ |> range(start: -1h)
+
+// Relative time range with start and stop
+from(bucket:"example-bucket")
+ |> range(start: -1h, stop: -10m)
+```
+
+{{% note %}}
+Relative ranges are relative to "now."
+{{% /note %}}
+
+###### Example absolute time range
+```js
+from(bucket:"example-bucket")
+ |> range(start: 2018-11-05T23:30:00Z, stop: 2018-11-06T00:00:00Z)
+```
+
+#### Use the following:
+For this guide, use the relative time range, `-15m`, to limit query results to data from the last 15 minutes:
+
+```js
+from(bucket:"example-bucket")
+ |> range(start: -15m)
+```
+
+## 3. Filter your data
+Pass your ranged data into the `filter()` function to narrow results based on data attributes or columns.
+The `filter()` function has one parameter, `fn`, which expects an anonymous function
+with logic that filters data based on columns or attributes.
+
+Flux's anonymous function syntax is similar to Javascript's.
+Records or rows are passed into the `filter()` function as an object (`r`).
+The anonymous function takes the object and evaluates it to see if it matches the defined filters.
+Use the `and` relational operator to chain multiple filters.
+
+```js
+// Pattern
+(r) => (r.objectProperty comparisonOperator comparisonExpression)
+
+// Example with single filter
+(r) => (r._measurement == "cpu")
+
+// Example with multiple filters
+(r) => (r._measurement == "cpu") and (r._field != "usage_system" )
+```
+
+#### Use the following:
+For this example, filter by the `cpu` measurement, the `usage_system` field, and the `cpu-total` tag value:
+
+```js
+from(bucket:"example-bucket")
+ |> range(start: -15m)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system" and
+ r.cpu == "cpu-total"
+ )
+```
+
+## 4. Yield your queried data
+Use Flux's `yield()` function to output the filtered tables as the result of the query.
+
+```js
+from(bucket:"example-bucket")
+ |> range(start: -15m)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system" and
+ r.cpu == "cpu-total"
+ )
+ |> yield()
+```
+
+{{% note %}}
+Flux automatically assume a `yield()` function at
+the end of each script in order to output and visualize the data.
+`yield()` is only necessary when including multiple queries in the same Flux query.
+Each set of returned data needs to be named using the `yield()` function.
+{{% /note %}}
+
+## Congratulations!
+You have now queried data from InfluxDB using Flux.
+This is a barebones query that can be transformed in other ways.
+
+
diff --git a/content/v2.0/query-data/get-started/syntax-basics.md b/content/v2.0/query-data/get-started/syntax-basics.md
new file mode 100644
index 000000000..9bc285a9c
--- /dev/null
+++ b/content/v2.0/query-data/get-started/syntax-basics.md
@@ -0,0 +1,217 @@
+---
+title: Flux syntax basics
+description: An introduction to the basic elements of the Flux syntax with real-world application examples.
+menu:
+ v2_0:
+ name: Syntax basics
+ parent: Get started with Flux
+ weight: 3
+---
+
+
+Flux, at its core, is a scripting language designed specifically for working with data.
+This guide walks through a handful of simple expressions and how they are handled in Flux.
+
+## Use the influx CLI's REPL
+Use the `influx repl` command to open the interactive read-eval-print-loop (REPL).
+Run the commands provided in this guide in the REPL.
+
+##### Start in the influx CLI in Flux mode
+```bash
+influx repl --org org-name
+```
+
+## Basic Flux syntax
+The code blocks below provide commands that illustrate the basic syntax of Flux.
+Run these commands in the REPL.
+
+### Simple expressions
+Flux is a scripting language that supports basic expressions.
+For example, simple addition:
+
+```js
+> 1 + 1
+2
+```
+
+### Variables
+Assign an expression to a variable using the assignment operator, `=`.
+
+```js
+> s = "this is a string"
+> i = 1 // an integer
+> f = 2.0 // a floating point number
+```
+
+Type the name of a variable to print its value:
+
+```js
+> s
+this is a string
+> i
+1
+> f
+2
+```
+
+### Objects
+Flux also supports objects. Each value in an object can be a different data type.
+
+```js
+> o = {name:"Jim", age: 42}
+```
+
+Use dot notation to access a properties of an object:
+
+```js
+> o.name
+Jim
+> o.age
+42
+```
+
+### Lists
+Flux supports lists. List values must be the same type.
+
+```js
+> n = 4
+> l = [1,2,3,n]
+> l
+[1, 2, 3, 4]
+```
+
+### Functions
+Flux uses functions for most of its heavy lifting.
+Below is a simple function that squares a number, `n`.
+
+```js
+> square = (n) => n * n
+> square(n:3)
+9
+```
+
+{{% note %}}
+Flux does not support positional arguments or parameters.
+Parameters must always be named when calling a function.
+{{% /note %}}
+
+### Pipe-forward operator
+Flux uses the pipe-forward operator (`|>`) extensively to chain operations together.
+After each function or operation, Flux returns a table or collection of tables containing data.
+The pipe-forward operator pipes those tables into the next function where they are further processed or manipulated.
+
+```js
+data |> someFunction() |> anotherFunction()
+```
+
+## Real-world application of basic syntax
+This likely seems familiar if you've already been through through the other
+[getting started guides](/v2.0/query-data/get-started).
+Flux's syntax is inspired by Javascript and other functional scripting languages.
+As you begin to apply these basic principles in real-world use cases such as creating data stream variables,
+custom functions, etc., the power of Flux and its ability to query and process data will become apparent.
+
+The examples below provide both multi-line and single-line versions of each input command.
+Carriage returns in Flux aren't necessary, but do help with readability.
+Both single- and multi-line commands can be copied and pasted into the `influx` CLI running in Flux mode.
+
+### Define data stream variables
+A common use case for variable assignments in Flux is creating variables for one
+or more input data streams.
+
+{{< code-tabs-wrapper >}}
+ {{% code-tabs %}}
+ [Multi-line](#)
+ [Single-line](#)
+ {{% /code-tabs %}}
+{{% code-tab-content %}}
+```js
+timeRange = -1h
+
+cpuUsageUser =
+ from(bucket:"example-bucket")
+ |> range(start: timeRange)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_user" and
+ r.cpu == "cpu-total"
+ )
+
+memUsagePercent =
+ from(bucket:"example-bucket")
+ |> range(start: timeRange)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+```
+{{% /code-tab-content %}}
+
+{{% code-tab-content %}}
+```js
+timeRange = -1h
+cpuUsageUser = from(bucket:"example-bucket") |> range(start: timeRange) |> filter(fn: (r) => r._measurement == "cpu" and r._field == "usage_user" and r.cpu == "cpu-total")
+memUsagePercent = from(bucket:"example-bucket") |> range(start: timeRange) |> filter(fn: (r) => r._measurement == "mem" and r._field == "used_percent")
+```
+{{% /code-tab-content %}}
+{{< /code-tabs-wrapper>}}
+
+These variables can be used in other functions, such as `join()`, while keeping the syntax minimal and flexible.
+
+### Define custom functions
+Create a function that returns the `N` number rows in the input stream with the highest `_value`s.
+To do this, pass the input stream (`tables`) and the number of results to return (`n`) into a custom function.
+Then using Flux's `sort()` and `limit()` functions to find the top `n` results in the data set.
+
+{{< code-tabs-wrapper >}}
+ {{% code-tabs %}}
+ [Multi-line](#)
+ [Single-line](#)
+ {{% /code-tabs %}}
+{{% code-tab-content %}}
+```js
+topN = (tables=<-, n) =>
+ tables
+ |> sort(desc: true)
+ |> limit(n: n)
+```
+{{% /code-tab-content %}}
+{{% code-tab-content %}}
+```js
+topN = (tables=<-, n) => tables |> sort(desc: true) |> limit(n: n)
+```
+{{% /code-tab-content %}}
+{{< /code-tabs-wrapper >}}
+
+_More information about creating custom functions is available in the [Custom functions](/v2.0/query-data/guides/custom-functions) documentation._
+
+Using the `cpuUsageUser` data stream variable defined above, find the top five data
+points with the custom `topN` function and yield the results.
+
+{{< code-tabs-wrapper >}}
+{{% code-tabs %}}
+[Multi-line](#)
+[Single-line](#)
+{{% /code-tabs %}}
+
+{{% code-tab-content %}}
+```js
+cpuUsageUser
+ |> topN(n:5)
+ |> yield()
+```
+{{% /code-tab-content %}}
+
+{{% code-tab-content %}}
+```js
+cpuUsageUser |> topN(n:5) |> yield()
+```
+{{% /code-tab-content %}}
+
+{{< /code-tabs-wrapper>}}
+
+This query will return the five data points with the highest user CPU usage over the last hour.
+
+
diff --git a/content/v2.0/query-data/get-started/transform-data.md b/content/v2.0/query-data/get-started/transform-data.md
new file mode 100644
index 000000000..0fe5a4654
--- /dev/null
+++ b/content/v2.0/query-data/get-started/transform-data.md
@@ -0,0 +1,176 @@
+---
+title: Transform data with Flux
+description: Learn the basics of using Flux to transform data queried from InfluxDB.
+menu:
+ v2_0:
+ name: Transform data
+ parent: Get started with Flux
+ weight: 2
+---
+
+When [querying data from InfluxDB](/v2.0/query-data/get-started/query-influxdb),
+you often need to transform that data in some way.
+Common examples are aggregating data into averages, downsampling data, etc.
+
+This guide demonstrates using [Flux functions](/v2.0/reference/flux/functions) to transform your data.
+It walks through creating a Flux script that partitions data into windows of time,
+averages the `_value`s in each window, and outputs the averages as a new table.
+
+It's important to understand how the "shape" of your data changes through each of these operations.
+
+## Query data
+Use the query built in the previous [Query data from InfluxDB](/v2.0/query-data/get-started/query-influxdb)
+guide, but update the range to pull data from the last hour:
+
+```js
+from(bucket:"example-bucket")
+ |> range(start: -1h)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system" and
+ r.cpu == "cpu-total"
+ )
+```
+
+## Flux functions
+Flux provides a number of functions that perform specific operations, transformations, and tasks.
+You can also [create custom functions](/v2.0/query-data/guides/custom-functions) in your Flux queries.
+_Functions are covered in detail in the [Flux functions](/v2.0/reference/flux/functions) documentation._
+
+A common type of function used when transforming data queried from InfluxDB is an aggregate function.
+Aggregate functions take a set of `_value`s in a table, aggregate them, and transform
+them into a new value.
+
+This example uses the [`mean()` function](/v2.0/reference/flux/functions/transformations/aggregates/mean)
+to average values within each time window.
+
+{{% note %}}
+The following example walks through the steps required to window and aggregate data,
+but there is a [`aggregateWindow()` helper function](#helper-functions) that does it for you.
+It's just good to understand the steps in the process.
+{{% /note %}}
+
+## Window your data
+Flux's [`window()` function](/v2.0/reference/flux/functions/transformations/window) partitions records based on a time value.
+Use the `every` parameter to define a duration of each window.
+
+For this example, window data in five minute intervals (`5m`).
+
+```js
+from(bucket:"example-bucket")
+ |> range(start: -1h)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system" and
+ r.cpu == "cpu-total"
+ )
+ |> window(every: 5m)
+```
+
+As data is gathered into windows of time, each window is output as its own table.
+When visualized, each table is assigned a unique color.
+
+
+
+## Aggregate windowed data
+Flux aggregate functions take the `_value`s in each table and aggregate them in some way.
+Use the [`mean()` function](/v2.0/reference/flux/functions/transformations/aggregates/mean) to average the `_value`s of each table.
+
+```js
+from(bucket:"example-bucket")
+ |> range(start: -1h)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system" and
+ r.cpu == "cpu-total"
+ )
+ |> window(every: 5m)
+ |> mean()
+```
+
+As rows in each window are aggregated, their output table contains only a single row with the aggregate value.
+Windowed tables are all still separate and, when visualized, will appear as single, unconnected points.
+
+
+
+## Add times to your aggregates
+As values are aggregated, the resulting tables do not have a `_time` column because
+the records used for the aggregation all have different timestamps.
+Aggregate functions don't infer what time should be used for the aggregate value.
+Therefore the `_time` column is dropped.
+
+A `_time` column is required in the [next operation](#unwindow-aggregate-tables).
+To add one, use the [`duplicate()` function](/v2.0/reference/flux/functions/transformations/duplicate)
+to duplicate the `_stop` column as the `_time` column for each windowed table.
+
+```js
+from(bucket:"example-bucket")
+ |> range(start: -1h)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system" and
+ r.cpu == "cpu-total"
+ )
+ |> window(every: 5m)
+ |> mean()
+ |> duplicate(column: "_stop", as: "_time")
+```
+
+## Unwindow aggregate tables
+
+Use the `window()` function with the `every: inf` parameter to gather all points
+into a single, infinite window.
+
+```js
+from(bucket:"example-bucket")
+ |> range(start: -1h)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system" and
+ r.cpu == "cpu-total"
+ )
+ |> window(every: 5m)
+ |> mean()
+ |> duplicate(column: "_stop", as: "_time")
+ |> window(every: inf)
+```
+
+Once ungrouped and combined into a single table, the aggregate data points will appear connected in your visualization.
+
+
+
+## Helper functions
+This may seem like a lot of coding just to build a query that aggregates data, however going through the
+process helps to understand how data changes "shape" as it is passed through each function.
+
+Flux provides (and allows you to create) "helper" functions that abstract many of these steps.
+The same operation performed in this guide can be accomplished using the
+[`aggregateWindow()` function](/v2.0/reference/flux/functions/transformations/aggregates/aggregatewindow).
+
+```js
+from(bucket:"example-bucket")
+ |> range(start: -1h)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system" and
+ r.cpu == "cpu-total"
+ )
+ |> aggregateWindow(every: 5m, fn: mean)
+```
+
+## Congratulations!
+You have now constructed a Flux query that uses Flux functions to transform your data.
+There are many more ways to manipulate your data using both Flux's primitive functions
+and your own custom functions, but this is a good introduction into the basic syntax and query structure.
+
+---
+
+_For a deeper dive into windowing and aggregating data with example data output for each transformation,
+view the [Window and aggregate data](/v2.0/query-data/guides/window-aggregate) guide._
+
+---
+
+
diff --git a/content/v2.0/query-data/guides/_index.md b/content/v2.0/query-data/guides/_index.md
new file mode 100644
index 000000000..c72e45394
--- /dev/null
+++ b/content/v2.0/query-data/guides/_index.md
@@ -0,0 +1,13 @@
+---
+title: Flux how-to guides
+description: Helpful guides that walk through both common and complex tasks and use cases for Flux.
+menu:
+ v2_0:
+ name: How-to guides
+ parent: Query data
+ weight: 3
+---
+
+The following guides walk through common query uses cases.
+
+{{% children %}}
diff --git a/content/v2.0/query-data/guides/custom-functions.md b/content/v2.0/query-data/guides/custom-functions.md
new file mode 100644
index 000000000..5f214f5c8
--- /dev/null
+++ b/content/v2.0/query-data/guides/custom-functions.md
@@ -0,0 +1,137 @@
+---
+title: Create custom Flux functions
+seotitle: Create custom Flux functions
+description: Create your own custom Flux functions to transform and manipulate data.
+menu:
+ v2_0:
+ name: Create custom functions
+ parent: How-to guides
+ weight: 8
+---
+
+Flux's functional syntax allows for custom functions.
+This guide walks through the basics of creating your own function.
+
+## Function definition structure
+The basic structure for defining functions in Flux is as follows:
+
+```js
+// Basic function definition structure
+functionName = (functionParameters) => functionOperations
+```
+
+##### functionName
+The name used to call the function in your Flux script.
+
+##### functionParameters
+A comma-separated list of parameters passed into the function and used in its operations.
+[Parameter defaults](#define-parameter-defaults) can be defined for each.
+
+##### functionOperations
+Operations and functions that manipulate the input into the desired output.
+
+#### Basic function examples
+
+###### Example square function
+```js
+// Function definition
+square = (n) => n * n
+
+// Function usage
+> square(n:3)
+9
+```
+
+###### Example multiply function
+```js
+// Function definition
+multiply = (x, y) => x * y
+
+// Function usage
+> multiply(x:2, y:15)
+30
+```
+
+## Functions that manipulate piped-forward data
+Most Flux functions manipulate data piped-forward into the function.
+In order for a custom function to process piped-forward data, one of the function
+parameters must capture the input tables using the `<-` pipe-receive expression.
+
+In the example below, the `tables` parameter is assigned to the `<-` expression,
+which represents all data piped-forward into the function.
+`tables` is then piped-forward into other operations in the function definition.
+
+```js
+functionName = (tables=<-) => tables |> functionOperations
+```
+
+#### Pipe-forwardable function example
+
+###### Multiply row values by x
+The example below defines a `multByX` function that multiplies the `_value` column
+of each row in the input table by the `x` parameter.
+It uses the [`map()` function](/v2.0/reference/flux/functions/transformations/map)
+to modify each `_value`.
+
+```js
+// Function definition
+multByX = (tables=<-, x) =>
+ tables
+ |> map(fn: (r) => r._value * x)
+
+// Function usage
+from(bucket: "telegraf/autogen")
+ |> range(start: -1m)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> multByX(x:2.0)
+```
+
+## Define parameter defaults
+Use the `=` assignment operator to assign a default value to function parameters
+in your function definition:
+
+```js
+functionName = (param1=defaultValue1, param2=defaultValue2) => functionOperation
+```
+
+Defaults are overridden by explicitly defining the parameter in the function call.
+
+#### Example functions with defaults
+
+###### Get the winner or the "winner"
+The example below defines a `getWinner` function that returns the record with the highest
+or lowest `_value` (winner versus "winner") depending on the `noSarcasm` parameter which defaults to `true`.
+It uses the [`sort()` function](/v2.0/reference/flux/functions/transformations/sort)
+to sort records in either descending or ascending order.
+It then uses the [`limit()` function](/v2.0/reference/flux/functions/transformations/limit)
+to return the first record from the sorted table.
+
+```js
+// Function definition
+getWinner = (tables=<-, noSarcasm:true) =>
+ tables
+ |> sort(desc: noSarcasm)
+ |> limit(n:1)
+
+// Function usage
+// Get the winner
+from(bucket: "telegraf/autogen")
+ |> range(start: -1m)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> getWinner()
+
+// Get the "winner"
+from(bucket: "telegraf/autogen")
+ |> range(start: -1m)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> getWinner(noSarcasm: false)
+```
diff --git a/content/v2.0/query-data/guides/group-data.md b/content/v2.0/query-data/guides/group-data.md
new file mode 100644
index 000000000..d185d9ea4
--- /dev/null
+++ b/content/v2.0/query-data/guides/group-data.md
@@ -0,0 +1,667 @@
+---
+title: Group data with Flux
+seotitle: How to group data with Flux
+description: >
+ This guide walks through grouping data with Flux by providing examples and
+ illustrating how data is shaped throughout the process.
+menu:
+ v2_0:
+ name: Group data
+ parent: How-to guides
+ weight: 3
+---
+
+With Flux, you can group data by any column in your queried data set.
+"Grouping" partitions data into tables in which each row shares a common value for specified columns.
+This guide walks through grouping data in Flux and provides examples of how data is shaped in the process.
+
+## Group keys
+Every table has a **group key** – a list of columns which for which every row in the table has the same value.
+
+###### Example group key
+```js
+[_start, _stop, _field, _measurement, host]
+```
+
+Grouping data in Flux is essentially defining the group key of output tables.
+Understanding how modifying group keys shapes output data is key to successfully
+grouping and transforming data into your desired output.
+
+## group() Function
+Flux's [`group()` function](/v2.0/reference/flux/functions/transformations/group) defines the
+group key for output tables, i.e. grouping records based on values for specific columns.
+
+###### group() example
+```js
+dataStream
+ |> group(columns: ["cpu", "host"])
+```
+
+###### Resulting group key
+```js
+[cpu, host]
+```
+
+The `group()` function has the following parameters:
+
+### columns
+The list of columns to include or exclude (depending on the [mode](#mode)) in the grouping operation.
+
+### mode
+The method used to define the group and resulting group key.
+Possible values include `by` and `except`.
+
+
+## Example grouping operations
+To illustrate how grouping works, define a `dataSet` variable that queries System
+CPU usage from the `telegraf/autogen` bucket.
+Filter the `cpu` tag so it only returns results for each numbered CPU core.
+
+### Data set
+CPU used by system operations for all numbered CPU cores.
+It uses a regular expression to filter only numbered cores.
+
+```js
+dataSet = from(bucket: "telegraf/autogen")
+ |> range(start: -2m)
+ |> filter(fn: (r) =>
+ r._field == "usage_system" and
+ r.cpu =~ /cpu[0-9*]/
+ )
+ |> drop(columns: ["host"])
+```
+
+{{% note %}}
+This example drops the `host` column from the returned data since the CPU data
+is only tracked for a single host and it simplifies the output tables.
+Don't drop the `host` column if monitoring multiple hosts.
+{{% /note %}}
+
+{{% truncate %}}
+```
+Table: keys: [_start, _stop, _field, _measurement, cpu]
+ _start:time _stop:time _field:string _measurement:string cpu:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:34:00.000000000Z 7.892107892107892
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:34:10.000000000Z 7.2
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:34:20.000000000Z 7.4
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:34:30.000000000Z 5.5
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:34:40.000000000Z 7.4
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:34:50.000000000Z 7.5
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:35:00.000000000Z 10.3
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:35:10.000000000Z 9.2
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:35:20.000000000Z 8.4
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:35:30.000000000Z 8.5
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:35:40.000000000Z 8.6
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:35:50.000000000Z 10.2
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu0 2018-11-05T21:36:00.000000000Z 10.6
+
+Table: keys: [_start, _stop, _field, _measurement, cpu]
+ _start:time _stop:time _field:string _measurement:string cpu:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:34:00.000000000Z 0.7992007992007992
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:34:10.000000000Z 0.7
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:34:20.000000000Z 0.7
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:34:30.000000000Z 0.4
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:34:40.000000000Z 0.7
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:34:50.000000000Z 0.7
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:35:00.000000000Z 1.4
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:35:10.000000000Z 1.2
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:35:20.000000000Z 0.8
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:35:30.000000000Z 0.8991008991008991
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:35:40.000000000Z 0.8008008008008008
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:35:50.000000000Z 0.999000999000999
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu1 2018-11-05T21:36:00.000000000Z 1.1022044088176353
+
+Table: keys: [_start, _stop, _field, _measurement, cpu]
+ _start:time _stop:time _field:string _measurement:string cpu:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:34:00.000000000Z 4.1
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:34:10.000000000Z 3.6
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:34:20.000000000Z 3.5
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:34:30.000000000Z 2.6
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:34:40.000000000Z 4.5
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:34:50.000000000Z 4.895104895104895
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:35:00.000000000Z 6.906906906906907
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:35:10.000000000Z 5.7
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:35:20.000000000Z 5.1
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:35:30.000000000Z 4.7
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:35:40.000000000Z 5.1
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:35:50.000000000Z 5.9
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu2 2018-11-05T21:36:00.000000000Z 6.4935064935064934
+
+Table: keys: [_start, _stop, _field, _measurement, cpu]
+ _start:time _stop:time _field:string _measurement:string cpu:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:34:00.000000000Z 0.5005005005005005
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:34:10.000000000Z 0.5
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:34:20.000000000Z 0.5
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:34:30.000000000Z 0.3
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:34:40.000000000Z 0.6
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:34:50.000000000Z 0.6
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:35:00.000000000Z 1.3986013986013985
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:35:10.000000000Z 0.9
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:35:20.000000000Z 0.5005005005005005
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:35:30.000000000Z 0.7
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:35:40.000000000Z 0.6
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:35:50.000000000Z 0.8
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z usage_system cpu cpu3 2018-11-05T21:36:00.000000000Z 0.9
+```
+{{% /truncate %}}
+
+**Note that the group key is output with each table: `Table: keys: `.**
+
+
+
+### Group by CPU
+Group the `dataSet` stream by the `cpu` column.
+
+```js
+dataSet
+ |> group(columns: ["cpu"])
+```
+
+This won't actually change the structure of the data since it already has `cpu`
+in the group key and is therefore grouped by `cpu`.
+However, notice that it does change the group key:
+
+{{% truncate %}}
+###### Group by CPU output tables
+```
+Table: keys: [cpu]
+ cpu:string _stop:time _time:time _value:float _field:string _measurement:string _start:time
+---------------------- ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+ cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 7.892107892107892 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:10.000000000Z 7.2 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:20.000000000Z 7.4 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:30.000000000Z 5.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:40.000000000Z 7.4 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:50.000000000Z 7.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:00.000000000Z 10.3 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:10.000000000Z 9.2 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:20.000000000Z 8.4 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:30.000000000Z 8.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:40.000000000Z 8.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:50.000000000Z 10.2 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu0 2018-11-05T21:36:00.000000000Z 2018-11-05T21:36:00.000000000Z 10.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [cpu]
+ cpu:string _stop:time _time:time _value:float _field:string _measurement:string _start:time
+---------------------- ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+ cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 0.7992007992007992 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:10.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:20.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:30.000000000Z 0.4 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:40.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:50.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:00.000000000Z 1.4 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:10.000000000Z 1.2 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:20.000000000Z 0.8 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:30.000000000Z 0.8991008991008991 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:40.000000000Z 0.8008008008008008 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:50.000000000Z 0.999000999000999 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu1 2018-11-05T21:36:00.000000000Z 2018-11-05T21:36:00.000000000Z 1.1022044088176353 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [cpu]
+ cpu:string _stop:time _time:time _value:float _field:string _measurement:string _start:time
+---------------------- ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+ cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 4.1 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:10.000000000Z 3.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:20.000000000Z 3.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:30.000000000Z 2.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:40.000000000Z 4.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:50.000000000Z 4.895104895104895 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:00.000000000Z 6.906906906906907 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:10.000000000Z 5.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:20.000000000Z 5.1 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:30.000000000Z 4.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:40.000000000Z 5.1 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:50.000000000Z 5.9 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu2 2018-11-05T21:36:00.000000000Z 2018-11-05T21:36:00.000000000Z 6.4935064935064934 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [cpu]
+ cpu:string _stop:time _time:time _value:float _field:string _measurement:string _start:time
+---------------------- ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+ cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 0.5005005005005005 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:10.000000000Z 0.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:20.000000000Z 0.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:30.000000000Z 0.3 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:40.000000000Z 0.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:50.000000000Z 0.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:00.000000000Z 1.3986013986013985 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:10.000000000Z 0.9 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:20.000000000Z 0.5005005005005005 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:30.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:40.000000000Z 0.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:35:50.000000000Z 0.8 usage_system cpu 2018-11-05T21:34:00.000000000Z
+ cpu3 2018-11-05T21:36:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.9 usage_system cpu 2018-11-05T21:34:00.000000000Z
+```
+{{% /truncate %}}
+
+The visualization remains the same.
+
+
+
+### Group by time
+Grouping data by the `_time` column is a good illustration of how grouping changes the structure of your data.
+
+```js
+dataSet
+ |> group(columns: ["_time"])
+```
+
+When grouping by `_time`, all records that share a common `_time` value are grouped into individual tables.
+So each output table represents a single point in time.
+
+{{% truncate %}}
+###### Group by time output tables
+```
+Table: keys: [_time]
+ _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string
+------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ----------------------
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 7.892107892107892 usage_system cpu cpu0
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.7992007992007992 usage_system cpu cpu1
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 4.1 usage_system cpu cpu2
+2018-11-05T21:34:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.5005005005005005 usage_system cpu cpu3
+
+Table: keys: [_time]
+ _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string
+------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ----------------------
+2018-11-05T21:34:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 7.2 usage_system cpu cpu0
+2018-11-05T21:34:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu cpu1
+2018-11-05T21:34:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 3.6 usage_system cpu cpu2
+2018-11-05T21:34:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.5 usage_system cpu cpu3
+
+Table: keys: [_time]
+ _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string
+------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ----------------------
+2018-11-05T21:34:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 7.4 usage_system cpu cpu0
+2018-11-05T21:34:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu cpu1
+2018-11-05T21:34:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 3.5 usage_system cpu cpu2
+2018-11-05T21:34:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.5 usage_system cpu cpu3
+
+Table: keys: [_time]
+ _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string
+------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ----------------------
+2018-11-05T21:34:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 5.5 usage_system cpu cpu0
+2018-11-05T21:34:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.4 usage_system cpu cpu1
+2018-11-05T21:34:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 2.6 usage_system cpu cpu2
+2018-11-05T21:34:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.3 usage_system cpu cpu3
+
+Table: keys: [_time]
+ _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string
+------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ----------------------
+2018-11-05T21:34:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 7.4 usage_system cpu cpu0
+2018-11-05T21:34:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu cpu1
+2018-11-05T21:34:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 4.5 usage_system cpu cpu2
+2018-11-05T21:34:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.6 usage_system cpu cpu3
+
+Table: keys: [_time]
+ _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string
+------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ----------------------
+2018-11-05T21:34:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 7.5 usage_system cpu cpu0
+2018-11-05T21:34:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu cpu1
+2018-11-05T21:34:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 4.895104895104895 usage_system cpu cpu2
+2018-11-05T21:34:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.6 usage_system cpu cpu3
+
+Table: keys: [_time]
+ _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string
+------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ----------------------
+2018-11-05T21:35:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 10.3 usage_system cpu cpu0
+2018-11-05T21:35:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 1.4 usage_system cpu cpu1
+2018-11-05T21:35:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 6.906906906906907 usage_system cpu cpu2
+2018-11-05T21:35:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 1.3986013986013985 usage_system cpu cpu3
+
+Table: keys: [_time]
+ _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string
+------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ----------------------
+2018-11-05T21:35:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 9.2 usage_system cpu cpu0
+2018-11-05T21:35:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 1.2 usage_system cpu cpu1
+2018-11-05T21:35:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 5.7 usage_system cpu cpu2
+2018-11-05T21:35:10.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.9 usage_system cpu cpu3
+
+Table: keys: [_time]
+ _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string
+------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ----------------------
+2018-11-05T21:35:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 8.4 usage_system cpu cpu0
+2018-11-05T21:35:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.8 usage_system cpu cpu1
+2018-11-05T21:35:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 5.1 usage_system cpu cpu2
+2018-11-05T21:35:20.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.5005005005005005 usage_system cpu cpu3
+
+Table: keys: [_time]
+ _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string
+------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ----------------------
+2018-11-05T21:35:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 8.5 usage_system cpu cpu0
+2018-11-05T21:35:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.8991008991008991 usage_system cpu cpu1
+2018-11-05T21:35:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 4.7 usage_system cpu cpu2
+2018-11-05T21:35:30.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu cpu3
+
+Table: keys: [_time]
+ _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string
+------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ----------------------
+2018-11-05T21:35:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 8.6 usage_system cpu cpu0
+2018-11-05T21:35:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.8008008008008008 usage_system cpu cpu1
+2018-11-05T21:35:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 5.1 usage_system cpu cpu2
+2018-11-05T21:35:40.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.6 usage_system cpu cpu3
+
+Table: keys: [_time]
+ _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string
+------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ----------------------
+2018-11-05T21:35:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 10.2 usage_system cpu cpu0
+2018-11-05T21:35:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.999000999000999 usage_system cpu cpu1
+2018-11-05T21:35:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 5.9 usage_system cpu cpu2
+2018-11-05T21:35:50.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.8 usage_system cpu cpu3
+
+Table: keys: [_time]
+ _time:time _start:time _stop:time _value:float _field:string _measurement:string cpu:string
+------------------------------ ------------------------------ ------------------------------ ---------------------------- ---------------------- ---------------------- ----------------------
+2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 10.6 usage_system cpu cpu0
+2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 1.1022044088176353 usage_system cpu cpu1
+2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 6.4935064935064934 usage_system cpu cpu2
+2018-11-05T21:36:00.000000000Z 2018-11-05T21:34:00.000000000Z 2018-11-05T21:36:00.000000000Z 0.9 usage_system cpu cpu3
+```
+{{% /truncate %}}
+
+Because each timestamp is a structured as a separate table, when visualized, they appear as individual, unconnected points.
+Even though there are multiple records per timestamp, it will only visualize the last record of group's table.
+
+
+
+{{% note %}}
+With some further processing, you could calculate the average CPU usage across all CPUs per point
+of time and group them into a single table, but we won't cover that in this example.
+If you're interested in running and visualizing this yourself, here's what the query would look like:
+
+```js
+dataSet
+ |> group(columns: ["_time"])
+ |> mean()
+ |> group(columns: ["_value", "_time"], mode: "except")
+```
+{{% /note %}}
+
+## Group by CPU and time
+Group by the `cpu` and `_time` columns.
+
+```js
+dataSet
+ |> group(columns: ["cpu", "_time"])
+```
+
+This outputs a table for every unique `cpu` and `_time` combination:
+
+{{% truncate %}}
+###### Group by CPU and time output tables
+```
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:00.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 7.892107892107892 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:00.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.7992007992007992 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:00.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 4.1 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:00.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.5005005005005005 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:10.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 7.2 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:10.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:10.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 3.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:10.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:20.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 7.4 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:20.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:20.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 3.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:20.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:30.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 5.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:30.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.4 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:30.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 2.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:30.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.3 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:40.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 7.4 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:40.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:40.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 4.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:40.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:50.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 7.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:50.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:50.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 4.895104895104895 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:34:50.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:00.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 10.3 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:00.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 1.4 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:00.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 6.906906906906907 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:00.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 1.3986013986013985 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:10.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 9.2 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:10.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 1.2 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:10.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 5.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:10.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.9 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:20.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 8.4 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:20.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.8 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:20.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 5.1 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:20.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.5005005005005005 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:30.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 8.5 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:30.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.8991008991008991 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:30.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 4.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:30.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.7 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:40.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 8.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:40.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.8008008008008008 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:40.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 5.1 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:40.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:50.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 10.2 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:50.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 0.999000999000999 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:50.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 5.9 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:35:50.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.8 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:36:00.000000000Z cpu0 2018-11-05T21:36:00.000000000Z 10.6 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:36:00.000000000Z cpu1 2018-11-05T21:36:00.000000000Z 1.1022044088176353 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:36:00.000000000Z cpu2 2018-11-05T21:36:00.000000000Z 6.4935064935064934 usage_system cpu 2018-11-05T21:34:00.000000000Z
+
+Table: keys: [_time, cpu]
+ _time:time cpu:string _stop:time _value:float _field:string _measurement:string _start:time
+------------------------------ ---------------------- ------------------------------ ---------------------------- ---------------------- ---------------------- ------------------------------
+2018-11-05T21:36:00.000000000Z cpu3 2018-11-05T21:36:00.000000000Z 0.9 usage_system cpu 2018-11-05T21:34:00.000000000Z
+```
+{{% /truncate %}}
+
+When visualized, tables appear as individual, unconnected points.
+
+
+
+Grouping by `cpu` and `_time` is a good illustration of how grouping works.
+
+## In conclusion
+Grouping is a powerful way to shape your data into your desired output format.
+It modifies the group keys of output tables, grouping records into tables that
+all share common values within specified columns.
diff --git a/content/v2.0/query-data/guides/histograms.md b/content/v2.0/query-data/guides/histograms.md
new file mode 100644
index 000000000..398c23384
--- /dev/null
+++ b/content/v2.0/query-data/guides/histograms.md
@@ -0,0 +1,141 @@
+---
+title: Create histograms with Flux
+seotitle: How to create histograms with Flux
+description: This guide walks through using the histogram() function to create cumulative histograms with Flux.
+menu:
+ v2_0:
+ name: Create histograms
+ parent: How-to guides
+ weight: 7
+---
+
+
+Histograms provide valuable insight into the distribution of your data.
+This guide walks through using Flux's `histogram()` function to transform your data into a **cumulative histogram**.
+
+## histgram() function
+The [`histogram()` function](/v2.0/reference/flux/functions/transformations/histogram) approximates the
+cumulative distribution of a dataset by counting data frequencies for a list of "bins."
+A **bin** is simply a range in which a data point falls.
+All data points that are less than or equal to the bound are counted in the bin.
+In the histogram output, a column is added (`le`) that represents the upper bounds of of each bin.
+Bin counts are cumulative.
+
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> histogram(bins: [0.0, 10.0, 20.0, 30.0])
+```
+
+{{% note %}}
+Values output by the `histogram` function represent points of data aggregated over time.
+Since values do not represent single points in time, there is no `_time` column in the output table.
+{{% /note %}}
+
+## Bin helper functions
+Flux provides two helper functions for generating histogram bins.
+Each generates an array of floats designed to be used in the `histogram()` function's `bins` parameter.
+
+### linearBins()
+The [`linearBins()` function](/v2.0/reference/flux/functions/misc/linearbins) generates a list of linearly separated floats.
+
+```js
+linearBins(start: 0.0, width: 10.0, count: 10)
+
+// Generated list: [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, +Inf]
+```
+
+### logarithmicBins()
+The [`logarithmicBins()` function](/v2.0/reference/flux/functions/misc/logarithmicbins) generates a list of exponentially separated floats.
+
+```js
+logarithmicBins(start: 1.0, factor: 2.0, count: 10, infinty: true)
+
+// Generated list: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, +Inf]
+```
+
+## Examples
+
+### Generating a histogram with linear bins
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> histogram(
+ bins: linearBins(
+ start:65.5,
+ width: 0.5,
+ count: 20,
+ infinity:false
+ )
+ )
+```
+
+###### Output table
+```
+Table: keys: [_start, _stop, _field, _measurement, host]
+ _start:time _stop:time _field:string _measurement:string host:string le:float _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------ ---------------------------- ----------------------------
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 65.5 5
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 66 6
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 66.5 8
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 67 9
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 67.5 9
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 68 10
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 68.5 12
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 69 12
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 69.5 15
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 70 23
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 70.5 30
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 71 30
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 71.5 30
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 72 30
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 72.5 30
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 73 30
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 73.5 30
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 74 30
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 74.5 30
+2018-11-07T22:19:58.423358000Z 2018-11-07T22:24:58.423358000Z used_percent mem Scotts-MacBook-Pro.local 75 30
+```
+
+### Generating a histogram with logarithmic bins
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> histogram(
+ bins: logarithmicBins(
+ start:0.5,
+ factor: 2.0,
+ count: 10,
+ infinity:false
+ )
+ )
+```
+
+###### Output table
+```
+Table: keys: [_start, _stop, _field, _measurement, host]
+ _start:time _stop:time _field:string _measurement:string host:string le:float _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------ ---------------------------- ----------------------------
+2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 0.5 0
+2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 1 0
+2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 2 0
+2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 4 0
+2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 8 0
+2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 16 0
+2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 32 0
+2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 64 2
+2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 128 30
+2018-11-07T22:23:36.860664000Z 2018-11-07T22:28:36.860664000Z used_percent mem Scotts-MacBook-Pro.local 256 30
+```
diff --git a/content/v2.0/query-data/guides/join.md b/content/v2.0/query-data/guides/join.md
new file mode 100644
index 000000000..3ed02b59d
--- /dev/null
+++ b/content/v2.0/query-data/guides/join.md
@@ -0,0 +1,302 @@
+---
+title: Join data with Flux
+seotitle: How to join data with Flux
+description: This guide walks through joining data with Flux and outlines how it shapes your data in the process.
+menu:
+ v2_0:
+ name: Join data
+ parent: How-to guides
+ weight: 5
+---
+
+The [`join()` function](/v2.0/reference/flux/functions/transformations/join) merges two or more
+input streams, whose values are equal on a set of common columns, into a single output stream.
+Flux allows you to join on any columns common between two data streams and opens the door
+for operations such as cross-measurement joins and math across measurements.
+
+To illustrate a join operation, use data captured by Telegraf and and stored in
+InfluxDB - memory usage and processes.
+
+In this guide, we'll join two data streams, one representing memory usage and the other representing the
+total number of running processes, then calculate the average memory usage per running process.
+
+## Define stream variables
+In order to perform a join, you must have two streams of data.
+Assign a variable to each data stream.
+
+### Memory used variable
+Define a `memUsed` variable that filters on the `mem` measurement and the `used` field.
+This returns the amount of memory (in bytes) used.
+
+###### memUsed stream definition
+```js
+memUsed = from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used"
+ )
+```
+
+{{% truncate %}}
+###### memUsed data output
+```
+Table: keys: [_start, _stop, _field, _measurement, host]
+ _start:time _stop:time _field:string _measurement:string host:string _time:time _value:int
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------ ------------------------------ --------------------------
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:50:00.000000000Z 10956333056
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:50:10.000000000Z 11014008832
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:50:20.000000000Z 11373428736
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:50:30.000000000Z 11001421824
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:50:40.000000000Z 10985852928
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:50:50.000000000Z 10992279552
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:51:00.000000000Z 11053568000
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:51:10.000000000Z 11092242432
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:51:20.000000000Z 11612774400
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:51:30.000000000Z 11131961344
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:51:40.000000000Z 11124805632
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:51:50.000000000Z 11332464640
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:52:00.000000000Z 11176923136
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:52:10.000000000Z 11181068288
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:52:20.000000000Z 11182579712
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:52:30.000000000Z 11238862848
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:52:40.000000000Z 11275296768
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:52:50.000000000Z 11225411584
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:53:00.000000000Z 11252690944
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:53:10.000000000Z 11227029504
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:53:20.000000000Z 11201646592
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:53:30.000000000Z 11227897856
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:53:40.000000000Z 11330428928
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:53:50.000000000Z 11347976192
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:54:00.000000000Z 11368271872
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:54:10.000000000Z 11269623808
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:54:20.000000000Z 11295637504
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:54:30.000000000Z 11354423296
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:54:40.000000000Z 11379687424
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:54:50.000000000Z 11248926720
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z used mem host1.local 2018-11-06T05:55:00.000000000Z 11292524544
+```
+{{% /truncate %}}
+
+### Total processes variable
+Define a `procTotal` variable that filters on the `processes` measurement and the `total` field.
+This returns the number of running processes.
+
+###### procTotal stream definition
+```js
+procTotal = from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "processes" and
+ r._field == "total"
+ )
+```
+
+{{% truncate %}}
+###### procTotal data output
+```
+Table: keys: [_start, _stop, _field, _measurement, host]
+ _start:time _stop:time _field:string _measurement:string host:string _time:time _value:int
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------ ------------------------------ --------------------------
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:50:00.000000000Z 470
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:50:10.000000000Z 470
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:50:20.000000000Z 471
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:50:30.000000000Z 470
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:50:40.000000000Z 469
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:50:50.000000000Z 471
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:51:00.000000000Z 470
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:51:10.000000000Z 470
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:51:20.000000000Z 470
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:51:30.000000000Z 470
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:51:40.000000000Z 469
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:51:50.000000000Z 471
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:52:00.000000000Z 471
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:52:10.000000000Z 470
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:52:20.000000000Z 470
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:52:30.000000000Z 471
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:52:40.000000000Z 472
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:52:50.000000000Z 471
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:53:00.000000000Z 470
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:53:10.000000000Z 470
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:53:20.000000000Z 470
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:53:30.000000000Z 471
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:53:40.000000000Z 471
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:53:50.000000000Z 471
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:54:00.000000000Z 471
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:54:10.000000000Z 470
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:54:20.000000000Z 471
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:54:30.000000000Z 473
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:54:40.000000000Z 471
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:54:50.000000000Z 471
+2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z total processes host1.local 2018-11-06T05:55:00.000000000Z 471
+```
+{{% /truncate %}}
+
+## Join the two data streams
+With the two data streams defined, use the `join()` function to join them together.
+`join()` requires two parameters:
+
+##### `tables`
+A map of tables to join with keys by which they will be aliased.
+In the example below, `mem` is the alias for `memUsed` and `proc` is the alias for `procTotal`.
+
+##### `on`
+An array of strings defining the columns on which the tables will be joined.
+_**Both tables must have all columns specified in this list.**_
+
+```js
+join(
+ tables: {mem:memUsed, proc:procTotal},
+ on: ["_time", "_stop", "_start", "host"]
+)
+```
+
+{{% truncate %}}
+###### Joined output table
+```
+Table: keys: [_field_mem, _field_proc, _measurement_mem, _measurement_proc, _start, _stop, host]
+ _field_mem:string _field_proc:string _measurement_mem:string _measurement_proc:string _start:time _stop:time host:string _time:time _value_mem:int _value_proc:int
+---------------------- ---------------------- ----------------------- ------------------------ ------------------------------ ------------------------------ ------------------------ ------------------------------ -------------------------- --------------------------
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:50:00.000000000Z 10956333056 470
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:50:10.000000000Z 11014008832 470
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:50:20.000000000Z 11373428736 471
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:50:30.000000000Z 11001421824 470
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:50:40.000000000Z 10985852928 469
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:50:50.000000000Z 10992279552 471
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:51:00.000000000Z 11053568000 470
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:51:10.000000000Z 11092242432 470
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:51:20.000000000Z 11612774400 470
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:51:30.000000000Z 11131961344 470
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:51:40.000000000Z 11124805632 469
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:51:50.000000000Z 11332464640 471
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:52:00.000000000Z 11176923136 471
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:52:10.000000000Z 11181068288 470
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:52:20.000000000Z 11182579712 470
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:52:30.000000000Z 11238862848 471
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:52:40.000000000Z 11275296768 472
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:52:50.000000000Z 11225411584 471
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:53:00.000000000Z 11252690944 470
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:53:10.000000000Z 11227029504 470
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:53:20.000000000Z 11201646592 470
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:53:30.000000000Z 11227897856 471
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:53:40.000000000Z 11330428928 471
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:53:50.000000000Z 11347976192 471
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:54:00.000000000Z 11368271872 471
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:54:10.000000000Z 11269623808 470
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:54:20.000000000Z 11295637504 471
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:54:30.000000000Z 11354423296 473
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:54:40.000000000Z 11379687424 471
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:54:50.000000000Z 11248926720 471
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:55:00.000000000Z 11292524544 471
+```
+{{% /truncate %}}
+
+Notice the output table includes the following columns:
+
+- `_field_mem`
+- `_field_proc`
+- `_measurement_mem`
+- `_measurement_proc`
+- `_value_mem`
+- `_value_proc`
+
+These represent the columns with values unique to the two input tables.
+
+## Calculate and create a new table
+With the two streams of data joined into a single table, use the
+[`map()` function](/v2.0/reference/flux/functions/transformations/map)
+to build a new table by mapping the existing `_time` column to a new `_time`
+column and dividing `_value_mem` by `_value_proc` and mapping it to a
+new `_value` column.
+
+```js
+join(tables: {mem:memUsed, proc:procTotal}, on: ["_time", "_stop", "_start", "host"])
+ |> map(fn: (r) => ({
+ _time: r._time,
+ _value: r._value_mem / r._value_proc
+ }))
+```
+
+{{% truncate %}}
+###### Mapped table
+```
+Table: keys: [_field_mem, _field_proc, _measurement_mem, _measurement_proc, _start, _stop, host]
+ _field_mem:string _field_proc:string _measurement_mem:string _measurement_proc:string _start:time _stop:time host:string _time:time _value:int
+---------------------- ---------------------- ----------------------- ------------------------ ------------------------------ ------------------------------ ------------------------ ------------------------------ --------------------------
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:50:00.000000000Z 23311346
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:50:10.000000000Z 23434061
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:50:20.000000000Z 24147407
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:50:30.000000000Z 23407280
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:50:40.000000000Z 23423993
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:50:50.000000000Z 23338173
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:51:00.000000000Z 23518229
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:51:10.000000000Z 23600515
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:51:20.000000000Z 24708030
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:51:30.000000000Z 23685024
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:51:40.000000000Z 23720267
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:51:50.000000000Z 24060434
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:52:00.000000000Z 23730197
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:52:10.000000000Z 23789506
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:52:20.000000000Z 23792722
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:52:30.000000000Z 23861704
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:52:40.000000000Z 23888340
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:52:50.000000000Z 23833145
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:53:00.000000000Z 23941895
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:53:10.000000000Z 23887296
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:53:20.000000000Z 23833290
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:53:30.000000000Z 23838424
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:53:40.000000000Z 24056112
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:53:50.000000000Z 24093367
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:54:00.000000000Z 24136458
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:54:10.000000000Z 23977922
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:54:20.000000000Z 23982245
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:54:30.000000000Z 24005123
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:54:40.000000000Z 24160695
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:54:50.000000000Z 23883071
+ used total mem processes 2018-11-06T05:50:00.000000000Z 2018-11-06T05:55:00.000000000Z Scotts-MacBook-Pro.local 2018-11-06T05:55:00.000000000Z 23975635
+```
+{{% /truncate %}}
+
+This table represents the average amount of memory in bytes per running process.
+
+
+## Real world example
+The following function calculates the batch sizes written to an InfluxDB cluster by joining
+fields from `httpd` and `write` measurements in order to compare `pointReq` and `writeReq`.
+The results are grouped by cluster ID so you can make comparisons across clusters.
+
+```js
+batchSize = (cluster_id, start=-1m, interval=10s) => {
+ httpd = from(bucket:"telegraf")
+ |> range(start:start)
+ |> filter(fn:(r) =>
+ r._measurement == "influxdb_httpd" and
+ r._field == "writeReq" and
+ r.cluster_id == cluster_id
+ )
+ |> aggregateWindow(every: interval, fn: mean)
+ |> derivative(nonNegative:true,unit:60s)
+
+ write = from(bucket:"telegraf")
+ |> range(start:start)
+ |> filter(fn:(r) =>
+ r._measurement == "influxdb_write" and
+ r._field == "pointReq" and
+ r.cluster_id == cluster_id
+ )
+ |> aggregateWindow(every: interval, fn: max)
+ |> derivative(nonNegative:true,unit:60s)
+
+ return join(
+ tables:{httpd:httpd, write:write},
+ on:["_time","_stop","_start","host"]
+ )
+ |> map(fn:(r) => ({
+ _time: r._time,
+ _value: r._value_httpd / r._value_write,
+ }))
+ |> group(columns: cluster_id)
+}
+
+batchSize(cluster_id: "enter cluster id here")
+```
diff --git a/content/v2.0/query-data/guides/regular-expressions.md b/content/v2.0/query-data/guides/regular-expressions.md
new file mode 100644
index 000000000..5918b0f9e
--- /dev/null
+++ b/content/v2.0/query-data/guides/regular-expressions.md
@@ -0,0 +1,85 @@
+---
+title: Use regular expressions in Flux
+seotitle: How to use regular expressions in Flux
+description: This guide walks through using regular expressions in evaluation logic in Flux functions.
+menu:
+ v2_0:
+ name: Use regular expressions
+ parent: How-to guides
+ weight: 9
+---
+
+Regular expressions (regexes) are incredibly powerful when matching patterns in large collections of data.
+With Flux, regular expressions are primarily used for evaluation logic in predicate functions for things
+such as filtering rows, dropping and keeping columns, state detection, etc.
+This guide shows how to use regular expressions in your Flux scripts.
+
+## Go regular expression syntax
+Flux uses Go's [regexp package](https://golang.org/pkg/regexp/) for regular expression search.
+The links [below](#helpful-links) provide information about Go's regular expression syntax.
+
+## Regular expression operators
+Flux provides two comparison operators for use with regular expressions.
+
+#### `=~`
+When the expression on the left **MATCHES** the regular expression on the right, this evaluates to `true`.
+
+#### `!~`
+When the expression on the left **DOES NOT MATCH** the regular expression on the right, this evaluates to `true`.
+
+## Regular expressions in Flux
+When using regex matching in your Flux scripts, enclose your regular expressions with `/`.
+The following is the basic regex comparison syntax:
+
+###### Basic regex comparison syntax
+```js
+expression =~ /regex/
+expression !~ /regex/
+```
+## Examples
+
+### Use a regex to filter by tag value
+The following example filters records by the `cpu` tag.
+It only keeps records for which the `cpu` is either `cpu0`, `cpu1`, or `cpu2`.
+
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -15m)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_user" and
+ r.cpu =~ /cpu[0-2]/
+ )
+```
+
+### Use a regex to filter by field key
+The following example excludes records that do not have `_percent` in a field key.
+
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -15m)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field =~ /_percent/
+ )
+```
+
+### Drop columns matching a regex
+The following example drops columns whose names do not being with `_`.
+
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -15m)
+ |> filter(fn: (r) => r._measurement == "mem")
+ |> drop(fn: (column) => column !~ /_.*/)
+```
+
+## Helpful links
+
+##### Syntax documentation
+[regexp Syntax GoDoc](https://godoc.org/regexp/syntax)
+[RE2 Syntax Overview](https://github.com/google/re2/wiki/Syntax)
+
+##### Go regex testers
+[Regex Tester - Golang](https://regex-golang.appspot.com/assets/html/index.html)
+[Regex101](https://regex101.com/)
diff --git a/content/v2.0/query-data/guides/sort-limit.md b/content/v2.0/query-data/guides/sort-limit.md
new file mode 100644
index 000000000..7256f54aa
--- /dev/null
+++ b/content/v2.0/query-data/guides/sort-limit.md
@@ -0,0 +1,56 @@
+---
+title: Sort and limit data with Flux
+seotitle: How to sort and limit data with Flux
+description: >
+ This guide walks through sorting and limiting data with Flux and outlines how
+ it shapes your data in the process.
+menu:
+ v2_0:
+ name: Sort and limit data
+ parent: How-to guides
+ weight: 6
+---
+
+The [`sort()`function](/v2.0/reference/flux/functions/transformations/sort)
+orders the records within each table.
+The following example orders system uptime first by region, then host, then value.
+
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-12h)
+ |> filter(fn: (r) =>
+ r._measurement == "system" and
+ r._field == "uptime"
+ )
+ |> sort(columns:["region", "host", "_value"])
+```
+
+The [`limit()` function](/v2.0/reference/flux/functions/transformations/limit)
+limits the number of records in output tables to a fixed number, `n`.
+The following example shows up to 10 records from the past hour.
+
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> limit(n:10)
+```
+
+You can use `sort()` and `limit()` together to show the top N records.
+The example below returns the 10 top system uptime values sorted first by
+region, then host, then value.
+
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-12h)
+ |> filter(fn: (r) =>
+ r._measurement == "system" and
+ r._field == "uptime"
+ )
+ |> sort(columns:["region", "host", "_value"])
+ |> limit(n:10)
+```
+
+You now have created a Flux query that sorts and limits data.
+Flux also provides the [`top()`](/v2.0/reference/flux/functions/transformations/selectors/top)
+and [`bottom()`](/v2.0/reference/flux/functions/transformations/selectors/bottom)
+functions to perform both of these functions at the same time.
diff --git a/content/v2.0/query-data/guides/window-aggregate.md b/content/v2.0/query-data/guides/window-aggregate.md
new file mode 100644
index 000000000..82d30feca
--- /dev/null
+++ b/content/v2.0/query-data/guides/window-aggregate.md
@@ -0,0 +1,340 @@
+---
+title: Window and aggregate data with Flux
+seotitle: How to window and aggregate data with Flux
+description: >
+ This guide walks through windowing and aggregating data with Flux and outlines
+ how it shapes your data in the process.
+menu:
+ v2_0:
+ name: Window and aggregate data
+ parent: How-to guides
+ weight: 2
+---
+
+A common operation performed with time series data is grouping data into windows of time,
+or "windowing" data, then aggregating windowed values into a new value.
+This guide walks through windowing and aggregating data with Flux and demonstrates
+how data is shaped in the process.
+
+{{% note %}}
+The following example is an in-depth walk-through of the steps required to window and aggregate data.
+The [`aggregateWindow()` function](#summing-up) performs these operations for you, but understanding
+how data is shaped in the process helps to successfully create your desired output.
+{{% /note %}}
+
+## Data set
+For the purposes of this guide, define a variable that represents your base data set.
+The following example queries the memory usage of the host machine.
+
+```js
+dataSet = from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> drop(columns: ["host"])
+```
+
+{{% note %}}
+This example drops the `host` column from the returned data since the memory data
+is only tracked for a single host and it simplifies the output tables.
+Dropping the `host` column is column is optional and not recommended if monitoring memory
+on multiple hosts.
+{{% /note %}}
+
+`dataSet` can now be used to represent your base data, which will look similar to the following:
+
+{{% truncate %}}
+```
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:50:00.000000000Z 71.11611366271973
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:50:10.000000000Z 67.39630699157715
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:50:20.000000000Z 64.16666507720947
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:50:30.000000000Z 64.19951915740967
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:50:40.000000000Z 64.2122745513916
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:50:50.000000000Z 64.22209739685059
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:00.000000000Z 64.6336555480957
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:10.000000000Z 64.16516304016113
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:20.000000000Z 64.18349742889404
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:30.000000000Z 64.20474052429199
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:40.000000000Z 68.65062713623047
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:50.000000000Z 67.20139980316162
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:00.000000000Z 70.9143877029419
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:10.000000000Z 64.14549350738525
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:20.000000000Z 64.15379047393799
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:30.000000000Z 64.1592264175415
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:40.000000000Z 64.18190002441406
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:50.000000000Z 64.28837776184082
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:00.000000000Z 64.29731845855713
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:10.000000000Z 64.36963081359863
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:20.000000000Z 64.37397003173828
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:30.000000000Z 64.44413661956787
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:40.000000000Z 64.42906856536865
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:50.000000000Z 64.44573402404785
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:00.000000000Z 64.48912620544434
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:10.000000000Z 64.49522972106934
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:20.000000000Z 64.48652744293213
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:30.000000000Z 64.49949741363525
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:40.000000000Z 64.4949197769165
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:50.000000000Z 64.49787616729736
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:55:00.000000000Z 64.49816226959229
+```
+{{% /truncate %}}
+
+## Windowing data
+Use the [`window()` function](/v2.0/reference/flux/functions/transformations/window)
+to group your data based on time bounds.
+The most common parameter passed with the `window()` is `every` which
+defines the duration of time between windows.
+Other parameters are available, but for this example, window the base data
+set into one minute windows.
+
+```js
+dataSet
+ |> window(every: 1m)
+```
+
+Each window of time is output in its own table containing all records that fall within the window.
+
+{{% truncate %}}
+###### window() output tables
+```
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:50:00.000000000Z 71.11611366271973
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:50:10.000000000Z 67.39630699157715
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:50:20.000000000Z 64.16666507720947
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:50:30.000000000Z 64.19951915740967
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:50:40.000000000Z 64.2122745513916
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:50:50.000000000Z 64.22209739685059
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:51:00.000000000Z 64.6336555480957
+2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:51:10.000000000Z 64.16516304016113
+2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:51:20.000000000Z 64.18349742889404
+2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:51:30.000000000Z 64.20474052429199
+2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:51:40.000000000Z 68.65062713623047
+2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:51:50.000000000Z 67.20139980316162
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:52:00.000000000Z 70.9143877029419
+2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:52:10.000000000Z 64.14549350738525
+2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:52:20.000000000Z 64.15379047393799
+2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:52:30.000000000Z 64.1592264175415
+2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:52:40.000000000Z 64.18190002441406
+2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:52:50.000000000Z 64.28837776184082
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:53:00.000000000Z 64.29731845855713
+2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:53:10.000000000Z 64.36963081359863
+2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:53:20.000000000Z 64.37397003173828
+2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:53:30.000000000Z 64.44413661956787
+2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:53:40.000000000Z 64.42906856536865
+2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:53:50.000000000Z 64.44573402404785
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:00.000000000Z 64.48912620544434
+2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:10.000000000Z 64.49522972106934
+2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:20.000000000Z 64.48652744293213
+2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:30.000000000Z 64.49949741363525
+2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:40.000000000Z 64.4949197769165
+2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:50.000000000Z 64.49787616729736
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:55:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:55:00.000000000Z 64.49816226959229
+```
+{{% /truncate %}}
+
+When visualized in the InfluxDB UI, each window table is displayed in a different color.
+
+
+
+## Aggregate data
+[Aggregate functions](/v2.0/reference/flux/functions/transformations/aggregates) take the values
+of all rows in a table and use them to perform an aggregate operation.
+The result is output as a new value in a single-row table.
+
+Since windowed data is split into separate tables, aggregate operations run against
+each table separately and output new tables containing only the aggregated value.
+
+For this example, use the [`mean()` function](/v2.0/reference/flux/functions/transformations/aggregates/mean)
+to output the average of each window:
+
+```js
+dataSet
+ |> window(every: 1m)
+ |> mean()
+```
+
+{{% truncate %}}
+###### mean() output tables
+```
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ----------------------------
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 65.88549613952637
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ----------------------------
+2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 65.50651391347249
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ----------------------------
+2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 65.30719598134358
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ----------------------------
+2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 64.39330975214641
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ----------------------------
+2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 64.49386278788249
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ----------------------------
+2018-11-03T17:55:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 64.49816226959229
+```
+{{% /truncate %}}
+
+Because each data point is contained in its own table, when visualized,
+they appear as single, unconnected points.
+
+
+
+### Recreate the time column
+**Notice the `_time` column is not in the [aggregated output tables](#mean-output-tables).**
+Because records in each table are aggregated together, their timestamps no longer
+apply and the column is removed from the group key and table.
+
+Also notice the `_start` and `_stop` columns still exist.
+These represent the lower and upper bounds of the time window.
+
+Many Flux functions rely on the `_time` column.
+To further process your data after an aggregate function, you need to re-add `_time`.
+Use the [`duplicate()` function](/v2.0/reference/flux/functions/transformations/duplicate) to
+duplicate either the `_start` or `_stop` column as a new `_time` column.
+
+```js
+dataSet
+ |> window(every: 1m)
+ |> mean()
+ |> duplicate(column: "_stop", as: "_time")
+```
+
+{{% truncate %}}
+###### duplicate() output tables
+```
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:51:00.000000000Z used_percent mem 2018-11-03T17:51:00.000000000Z 65.88549613952637
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:51:00.000000000Z 2018-11-03T17:52:00.000000000Z used_percent mem 2018-11-03T17:52:00.000000000Z 65.50651391347249
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:52:00.000000000Z 2018-11-03T17:53:00.000000000Z used_percent mem 2018-11-03T17:53:00.000000000Z 65.30719598134358
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:53:00.000000000Z 2018-11-03T17:54:00.000000000Z used_percent mem 2018-11-03T17:54:00.000000000Z 64.39330975214641
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:54:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:55:00.000000000Z 64.49386278788249
+
+
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:55:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:55:00.000000000Z 64.49816226959229
+```
+{{% /truncate %}}
+
+## "Unwindow" aggregate tables
+Keeping aggregate values in separate tables generally isn't the format in which you want your data.
+Use the `window()` function to "unwindow" your data into a single infinite (`inf`) window.
+
+```js
+dataSet
+ |> window(every: 1m)
+ |> mean()
+ |> duplicate(column: "_stop", as: "_time")
+ |> window(every: inf)
+```
+
+{{% note %}}
+Windowing requires a `_time` column which is why it's necessary to
+[recreate the `_time` column](#recreate-the-time-column) after an aggregation.
+{{% /note %}}
+
+###### Unwindowed output table
+```
+Table: keys: [_start, _stop, _field, _measurement]
+ _start:time _stop:time _field:string _measurement:string _time:time _value:float
+------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:51:00.000000000Z 65.88549613952637
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:52:00.000000000Z 65.50651391347249
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:53:00.000000000Z 65.30719598134358
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:54:00.000000000Z 64.39330975214641
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:55:00.000000000Z 64.49386278788249
+2018-11-03T17:50:00.000000000Z 2018-11-03T17:55:00.000000000Z used_percent mem 2018-11-03T17:55:00.000000000Z 64.49816226959229
+```
+
+With the aggregate values in a single table, data points in the visualization are connected.
+
+
+
+## Summing up
+You have now created a Flux query that windows and aggregates data.
+The data transformation process outlined in this guide should be used for all aggregation operations.
+
+Flux also provides the [`aggregateWindow()` function](/v2.0/reference/flux/functions/transformations/aggregates/aggregatewindow)
+which performs all these separate functions for you.
+
+The following Flux query will return the same results:
+
+###### aggregateWindow function
+```js
+dataSet
+ |> aggregateWindow(every: 1m, fn: mean)
+```
diff --git a/content/v2.0/reference/cli/_index.md b/content/v2.0/reference/cli/_index.md
index 5a38e0f18..9072b06a4 100644
--- a/content/v2.0/reference/cli/_index.md
+++ b/content/v2.0/reference/cli/_index.md
@@ -14,4 +14,4 @@ InfluxDB provides command line tools designed to aid in managing and working
with InfluxDB from the command line.
The following command line interfaces (CLIs) are available:
-[influx](/v2.0/reference/cli/influx)
+{{[influx](/v2.0/reference/cli/influx)}}
diff --git a/content/v2.0/reference/flux/_index.md b/content/v2.0/reference/flux/_index.md
new file mode 100644
index 000000000..d41c5aa37
--- /dev/null
+++ b/content/v2.0/reference/flux/_index.md
@@ -0,0 +1,13 @@
+---
+title: Flux query language
+description: Reference articles for Flux functions and the Flux language specification.
+menu:
+ v2_0_ref:
+ name: Flux query language
+ weight: 2
+---
+
+The following articles are meant as a reference for Flux functions and the
+Flux language specification.
+
+{{< children >}}
diff --git a/content/v2.0/reference/flux/functions/_index.md b/content/v2.0/reference/flux/functions/_index.md
new file mode 100644
index 000000000..2e26bcdb0
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/_index.md
@@ -0,0 +1,15 @@
+---
+title: Flux functions
+description: Flux functions allows you to retrieve, transform, process, and output data easily.
+menu:
+ v2_0_ref:
+ name: Flux functions
+ parent: Flux query language
+ weight: 4
+---
+
+Flux's functional syntax allows you to retrieve, transform, process, and output data easily.
+There is a large library of built-in functions, but you can also create your own
+custom functions to perform operations that suit your needs.
+
+{{< children >}}
diff --git a/content/v2.0/reference/flux/functions/inputs/_index.md b/content/v2.0/reference/flux/functions/inputs/_index.md
new file mode 100644
index 000000000..a12eb4d95
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/inputs/_index.md
@@ -0,0 +1,14 @@
+---
+title: Flux input functions
+description: Flux input functions define sources of data or or display information about data sources.
+menu:
+ v2_0_ref:
+ parent: Flux functions
+ name: Inputs
+ weight: 1
+---
+
+Flux input functions define sources of data or display information about data sources.
+The following input functions are available:
+
+{{< function-list category="Inputs" menu="v2_0_ref" >}}
diff --git a/content/v2.0/reference/flux/functions/inputs/buckets.md b/content/v2.0/reference/flux/functions/inputs/buckets.md
new file mode 100644
index 000000000..5c2e180af
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/inputs/buckets.md
@@ -0,0 +1,22 @@
+---
+title: buckets() function
+description: The buckets() function returns a list of buckets in the organization.
+menu:
+ v2_0_ref:
+ name: buckets
+ parent: Inputs
+ weight: 1
+---
+
+The `buckets()` function returns a list of buckets in the organization.
+
+_**Function type:** Input_
+
+```js
+buckets()
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[SHOW DATABASES](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-databases)
diff --git a/content/v2.0/reference/flux/functions/inputs/from.md b/content/v2.0/reference/flux/functions/inputs/from.md
new file mode 100644
index 000000000..86ad34c7a
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/inputs/from.md
@@ -0,0 +1,50 @@
+---
+title: from() function
+description: The from() function retrieves data from an InfluxDB data source.
+menu:
+ v2_0_ref:
+ name: from
+ parent: Inputs
+ weight: 1
+---
+
+The `from()` function retrieves data from an InfluxDB data source.
+It returns a stream of tables from the specified [bucket](#parameters).
+Each unique series is contained within its own table.
+Each record in the table represents a single point in the series.
+
+_**Function type:** Input_
+_**Output data type:** Object_
+
+```js
+from(bucket: "telegraf/autogen")
+
+// OR
+
+from(bucketID: "0261d8287f4d6000")
+```
+
+## Parameters
+
+### bucket
+The name of the bucket to query.
+
+_**Data type:** String_
+
+### bucketID
+The string-encoded ID of the bucket to query.
+
+_**Data type:** String_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+```
+```js
+from(bucketID: "0261d8287f4d6000")
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[FROM](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#from-clause)
diff --git a/content/v2.0/reference/flux/functions/inputs/fromcsv.md b/content/v2.0/reference/flux/functions/inputs/fromcsv.md
new file mode 100644
index 000000000..95e376020
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/inputs/fromcsv.md
@@ -0,0 +1,64 @@
+---
+title: fromCSV() function
+description: The fromCSV() function retrieves data from a CSV data source.
+menu:
+ v2_0_ref:
+ name: fromCSV
+ parent: Inputs
+ weight: 1
+---
+
+The `fromCSV()` function retrieves data from a comma-separated value (CSV) data source.
+It returns a stream of tables.
+Each unique series is contained within its own table.
+Each record in the table represents a single point in the series.
+
+_**Function type:** Input_
+_**Output data type:** Object_
+
+```js
+from(file: "/path/to/data-file.csv")
+
+// OR
+
+from(csv: csvData)
+```
+
+## Parameters
+
+### file
+The file path of the CSV file to query.
+The path can be absolute or relative.
+If relative, it is relative to the working directory of the `influxd` process.
+
+_**Data type:** String_
+
+### csv
+Raw CSV-formatted text.
+
+{{% note %}}
+CSV data must be in the CSV format produced by the Flux HTTP response standard.
+See the [Flux technical specification](https://github.com/influxdata/flux/blob/master/docs/SPEC.md#csv)
+for information about this format.
+{{% /note %}}
+
+_**Data type:** String_
+
+## Examples
+
+### Query CSV data from a file
+```js
+from(file: "/path/to/data-file.csv")
+```
+
+### Query raw CSV-formatted text
+```js
+csvData = "
+result,table,_start,_stop,_time,region,host,_value
+mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43
+mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25
+mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62
+"
+
+from(csv: csvData)
+```
diff --git a/content/v2.0/reference/flux/functions/misc/_index.md b/content/v2.0/reference/flux/functions/misc/_index.md
new file mode 100644
index 000000000..cf285784f
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/misc/_index.md
@@ -0,0 +1,15 @@
+---
+title: Flux miscellaneous functions
+description: Flux provides miscellaneous functions that serve purposes other than retrieving, transforming, or outputting data.
+menu:
+ v2_0_ref:
+ parent: Flux functions
+ name: Miscellaneous
+ weight: 5
+---
+
+Flux functions primarily retrieve, shape and transform, then output data, however
+there are functions available that serve other purposes.
+The following functions are are available but don't fit within other function categories:
+
+{{< function-list category="Miscellaneous" menu="v2_0_ref" >}}
diff --git a/content/v2.0/reference/flux/functions/misc/intervals.md b/content/v2.0/reference/flux/functions/misc/intervals.md
new file mode 100644
index 000000000..db6f039a0
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/misc/intervals.md
@@ -0,0 +1,154 @@
+---
+title: intervals() function
+description: The intervals() function generates a set of time intervals over a range of time.
+menu:
+ v2_0_ref:
+ name: intervals
+ parent: Miscellaneous
+ weight: 1
+---
+
+The `intervals()` function generates a set of time intervals over a range of time.
+
+An interval is an object with `start` and `stop` properties that correspond to the inclusive start and exclusive stop times of the time interval.
+The return value of intervals is another function that accepts start and stop time parameters and returns an interval generator.
+The generator is then used to produce the set of intervals.
+The set of intervals includes all intervals that intersect with the initial range of time.
+
+{{% note %}}
+The `intervals()` function is designed to be used with the intervals parameter of the [`window()` function](/v2.0/reference/flux/functions/transformations/window).
+{{% /note %}}
+
+_**Function type:** Miscellaneous_
+_**Output data type:** Object_
+
+```js
+intervals()
+```
+
+## Parameters
+
+### every
+The duration between starts of each of the intervals.
+The Nth interval start time is the initial start time plus the offset plus an Nth multiple of the every parameter.
+Defaults to the value of the `period` duration.
+
+_**Data type:** Duration_
+
+### period
+The length of each interval.
+Each interval's stop time is equal to the interval start time plus the period duration.
+It can be negative, indicating the start and stop boundaries are reversed.
+Defaults to the value of the `every` duration.
+
+_**Data type:** Duration_
+
+### offset
+The offset duration relative to the location offset.
+It can be negative, indicating that the offset goes backwards in time.
+Defaults to `0h`.
+
+_**Data type:** Duration_
+
+### filter
+A function that accepts an interval object and returns a boolean value.
+Each potential interval is passed to the filter function.
+When the function returns false, that interval is excluded from the set of intervals.
+Defaults to include all intervals.
+
+_**Data type:** Function_
+
+## Examples
+
+##### Basic intervals
+```js
+// 1 hour intervals
+intervals(every:1h)
+
+// 2 hour long intervals every 1 hour
+intervals(every:1h, period:2h)
+
+// 2 hour long intervals every 1 hour starting at 30m past the hour
+intervals(every:1h, period:2h, offset:30m)
+
+// 1 week intervals starting on Monday (by default weeks start on Sunday)
+intervals(every:1w, offset:1d)
+
+// the hour from 11PM - 12AM every night
+intervals(every:1d, period:-1h)
+
+// the last day of each month
+intervals(every:1mo, period:-1d)
+```
+
+##### Using a predicate
+```js
+// 1 day intervals excluding weekends
+intervals(
+ every:1d,
+ filter: (interval) => !(weekday(time: interval.start) in [Sunday, Saturday]),
+)
+
+// Work hours from 9AM - 5PM on work days.
+intervals(
+ every:1d,
+ period:8h,
+ offset:9h,
+ filter:(interval) => !(weekday(time: interval.start) in [Sunday, Saturday]),
+)
+```
+
+##### Using known start and stop dates
+```js
+// Every hour for six hours on Sep 5th.
+intervals(every:1h)(start:2018-09-05T00:00:00-07:00, stop: 2018-09-05T06:00:00-07:00)
+
+// Generates
+// [2018-09-05T00:00:00-07:00, 2018-09-05T01:00:00-07:00)
+// [2018-09-05T01:00:00-07:00, 2018-09-05T02:00:00-07:00)
+// [2018-09-05T02:00:00-07:00, 2018-09-05T03:00:00-07:00)
+// [2018-09-05T03:00:00-07:00, 2018-09-05T04:00:00-07:00)
+// [2018-09-05T04:00:00-07:00, 2018-09-05T05:00:00-07:00)
+// [2018-09-05T05:00:00-07:00, 2018-09-05T06:00:00-07:00)
+
+// Every hour for six hours with 1h30m periods on Sep 5th
+intervals(every:1h, period:1h30m)(start:2018-09-05T00:00:00-07:00, stop: 2018-09-05T06:00:00-07:00)
+
+// Generates
+// [2018-09-05T00:00:00-07:00, 2018-09-05T01:30:00-07:00)
+// [2018-09-05T01:00:00-07:00, 2018-09-05T02:30:00-07:00)
+// [2018-09-05T02:00:00-07:00, 2018-09-05T03:30:00-07:00)
+// [2018-09-05T03:00:00-07:00, 2018-09-05T04:30:00-07:00)
+// [2018-09-05T04:00:00-07:00, 2018-09-05T05:30:00-07:00)
+// [2018-09-05T05:00:00-07:00, 2018-09-05T06:30:00-07:00)
+
+// Every hour for six hours using the previous hour on Sep 5th
+intervals(every:1h, period:-1h)(start:2018-09-05T12:00:00-07:00, stop: 2018-09-05T18:00:00-07:00)
+
+// Generates
+// [2018-09-05T11:00:00-07:00, 2018-09-05T12:00:00-07:00)
+// [2018-09-05T12:00:00-07:00, 2018-09-05T13:00:00-07:00)
+// [2018-09-05T13:00:00-07:00, 2018-09-05T14:00:00-07:00)
+// [2018-09-05T14:00:00-07:00, 2018-09-05T15:00:00-07:00)
+// [2018-09-05T15:00:00-07:00, 2018-09-05T16:00:00-07:00)
+// [2018-09-05T16:00:00-07:00, 2018-09-05T17:00:00-07:00)
+// [2018-09-05T17:00:00-07:00, 2018-09-05T18:00:00-07:00)
+
+// Every month for 4 months starting on Jan 1st
+intervals(every:1mo)(start:2018-01-01, stop: 2018-05-01)
+
+// Generates
+// [2018-01-01, 2018-02-01)
+// [2018-02-01, 2018-03-01)
+// [2018-03-01, 2018-04-01)
+// [2018-04-01, 2018-05-01)
+
+// Every month for 4 months starting on Jan 15th
+intervals(every:1mo)(start:2018-01-15, stop: 2018-05-15)
+
+// Generates
+// [2018-01-15, 2018-02-15)
+// [2018-02-15, 2018-03-15)
+// [2018-03-15, 2018-04-15)
+// [2018-04-15, 2018-05-15)
+```
diff --git a/content/v2.0/reference/flux/functions/misc/linearbins.md b/content/v2.0/reference/flux/functions/misc/linearbins.md
new file mode 100644
index 000000000..7b8f22859
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/misc/linearbins.md
@@ -0,0 +1,51 @@
+---
+title: linearBins() function
+description: The linearBins() function generates a list of linearly separated floats.
+menu:
+ v2_0_ref:
+ name: linearBins
+ parent: Miscellaneous
+ weight: 1
+---
+
+The `linearBins()` function generates a list of linearly separated floats.
+It is a helper function meant to generate bin bounds for the
+[`histogram()` function](/v2.0/reference/flux/functions/transformations/histogram).
+
+_**Function type:** Miscellaneous_
+_**Output data type:** Array of floats_
+
+```js
+linearBins(start: 0.0, width: 5.0, count: 20, infinity: true)
+```
+
+## Parameters
+
+### start
+The first value in the returned list.
+
+_**Data type:** Float_
+
+### width
+The distance between subsequent bin values.
+
+_**Data type:** Float_
+
+### count
+The number of bins to create.
+
+_**Data type:** Integer_
+
+### infinity
+When `true`, adds an additional bin with a value of positive infinity.
+Defaults to `true`.
+
+_**Data type:** Boolean_
+
+## Examples
+
+```js
+linearBins(start: 0.0, width: 10.0, count: 10)
+
+// Generated list: [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, +Inf]
+```
diff --git a/content/v2.0/reference/flux/functions/misc/logarithmicbins.md b/content/v2.0/reference/flux/functions/misc/logarithmicbins.md
new file mode 100644
index 000000000..37ebb3034
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/misc/logarithmicbins.md
@@ -0,0 +1,50 @@
+---
+title: logarithmicBins() function
+description: The logarithmicBins() function generates a list of exponentially separated floats.
+menu:
+ v2_0_ref:
+ name: logarithmicBins
+ parent: Miscellaneous
+ weight: 1
+---
+
+The `logarithmicBins()` function generates a list of exponentially separated floats.
+It is a helper function meant to generate bin bounds for the
+[`histogram()` function](/v2.0/reference/flux/functions/transformations/histogram).
+
+_**Function type:** Miscellaneous_
+_**Output data type:** Array of floats_
+
+```js
+logarithmicBins(start:1.0, factor: 2.0, count: 10, infinity: true)
+```
+
+## Parameters
+
+### start
+The first value in the returned bin list.
+
+_**Data type:** Float_
+
+### factor
+The multiplier applied to each subsequent bin.
+
+_**Data type:** Float_
+
+### count
+The number of bins to create.
+
+_**Data type:** Integer_
+
+### infinity
+When `true`, adds an additional bin with a value of positive infinity.
+Defaults to `true`.
+
+_**Data type:** Boolean_
+
+## Examples
+```js
+logarithmicBins(start: 1.0, factor: 2.0, count: 10, infinty: true)
+
+// Generated list: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, +Inf]
+```
diff --git a/content/v2.0/reference/flux/functions/misc/systemtime.md b/content/v2.0/reference/flux/functions/misc/systemtime.md
new file mode 100644
index 000000000..bce87b251
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/misc/systemtime.md
@@ -0,0 +1,23 @@
+---
+title: systemTime() function
+description: The systemTime() function returns the current system time.
+menu:
+ v2_0_ref:
+ name: systemTime
+ parent: Miscellaneous
+ weight: 1
+---
+
+The `systemTime()` function returns the current system time.
+
+_**Function type:** Date/Time_
+_**Output data type:** Timestamp_
+
+```js
+systemTime()
+```
+
+## Examples
+```js
+offsetTime = (offset) => systemTime() |> shift(shift: offset)
+```
diff --git a/content/v2.0/reference/flux/functions/outputs/_index.md b/content/v2.0/reference/flux/functions/outputs/_index.md
new file mode 100644
index 000000000..96520cf14
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/outputs/_index.md
@@ -0,0 +1,14 @@
+---
+title: Flux output functions
+description: Flux output functions yield results or send data to a specified output destination.
+menu:
+ v2_0_ref:
+ parent: Flux functions
+ name: Outputs
+ weight: 2
+---
+
+Flux output functions yield results or send data to a specified output destination.
+The following output functions are are available:
+
+{{< function-list category="Outputs" menu="v2_0_ref" >}}
diff --git a/content/v2.0/reference/flux/functions/outputs/to.md b/content/v2.0/reference/flux/functions/outputs/to.md
new file mode 100644
index 000000000..b4dc9be12
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/outputs/to.md
@@ -0,0 +1,158 @@
+---
+title: to() function
+description: The to() function writes data to an InfluxDB v2.0 bucket.
+menu:
+ v2_0_ref:
+ name: to
+ parent: Outputs
+ weight: 1
+---
+
+The `to()` function writes data to an **InfluxDB v2.0** bucket.
+
+_**Function type:** Output_
+_**Output data type:** Object_
+
+```js
+to(
+ bucket: "my-bucket",
+ org: "my-org",
+ host: "http://example.com:8086",
+ token: "xxxxxx",
+ timeColumn: "_time",
+ tagColumns: ["tag1", "tag2", "tag3"],
+ fieldFn: (r) => ({ [r._field]: r._value })
+)
+
+// OR
+
+to(
+ bucketID: "1234567890",
+ orgID: "0987654321",
+ host: "http://example.com:8086",
+ token: "xxxxxx",
+ timeColumn: "_time",
+ tagColumns: ["tag1", "tag2", "tag3"],
+ fieldFn: (r) => ({ [r._field]: r._value })
+)
+```
+
+## Parameters
+{{% note %}}
+`bucket` OR `bucketID` is **required**.
+{{% /note %}}
+
+### bucket
+The bucket to which data is written. Mutually exclusive with `bucketID`.
+
+_**Data type:** String_
+
+### bucketID
+The ID of the bucket to which data is written. Mutually exclusive with `bucket`.
+
+_**Data type:** String_
+
+### org
+The organization name of the specified [`bucket`](#bucket).
+Only required when writing to a remote host.
+Mutually exclusive with `orgID`
+
+_**Data type:** String_
+
+{{% note %}}
+Specify either an `org` or an `orgID`, but not both.
+{{% /note %}}
+
+### orgID
+The organization ID of the specified [`bucket`](#bucket).
+Only required when writing to a remote host.
+Mutually exclusive with `org`.
+
+_**Data type:** String_
+
+### host
+The remote InfluxDB host to which to write.
+_If specified, a `token` is required._
+
+_**Data type:** String_
+
+### token
+The authorization token to use when writing to a remote host.
+_Required when a `host` is specified._
+
+_**Data type:** String_
+
+### timeColumn
+The time column of the output.
+Default is `"_time"`.
+
+_**Data type:** String_
+
+### tagColumns
+The tag columns of the output.
+Defaults to all columns with type `string`, excluding all value columns and the `_field` column if present.
+
+_**Data type:** Array of strings_
+
+### fieldFn
+Function that takes a record from the input table and returns an object.
+For each record from the input table, `fieldFn` returns an object that maps output the field key to the output value.
+Default is `(r) => ({ [r._field]: r._value })`
+
+_**Data type:** Function_
+_**Output data type:** Object_
+
+## Examples
+
+### Default to() operation
+Given the following table:
+
+| _time | _start | _stop | _measurement | _field | _value |
+| ----- | ------ | ----- | ------------ | ------ | ------ |
+| 0005 | 0000 | 0009 | "a" | "temp" | 100.1 |
+| 0006 | 0000 | 0009 | "a" | "temp" | 99.3 |
+| 0007 | 0000 | 0009 | "a" | "temp" | 99.9 |
+
+The default `to` operation:
+
+```js
+// ...
+|> to(bucket:"my-bucket", org:"my-org")
+```
+
+is equivalent to writing the above data using the following line protocol:
+
+```
+_measurement=a temp=100.1 0005
+_measurement=a temp=99.3 0006
+_measurement=a temp=99.9 0007
+```
+
+### Custom to() operation
+The `to()` functions default operation can be overridden. For example, given the following table:
+
+| _time | _start | _stop | tag1 | tag2 | hum | temp |
+| ----- | ------ | ----- | ---- | ---- | ---- | ----- |
+| 0005 | 0000 | 0009 | "a" | "b" | 55.3 | 100.1 |
+| 0006 | 0000 | 0009 | "a" | "b" | 55.4 | 99.3 |
+| 0007 | 0000 | 0009 | "a" | "b" | 55.5 | 99.9 |
+
+The operation:
+
+```js
+// ...
+|> to(bucket:"my-bucket", org:"my-org", tagColumns:["tag1"], fieldFn: (r) => return {"hum": r.hum, "temp": r.temp})
+```
+
+is equivalent to writing the above data using the following line protocol:
+
+```
+_tag1=a hum=55.3,temp=100.1 0005
+_tag1=a hum=55.4,temp=99.3 0006
+_tag1=a hum=55.5,temp=99.9 0007
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[SELECT INTO](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#the-into-clause)
diff --git a/content/v2.0/reference/flux/functions/outputs/yield.md b/content/v2.0/reference/flux/functions/outputs/yield.md
new file mode 100644
index 000000000..d9882c3d2
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/outputs/yield.md
@@ -0,0 +1,45 @@
+---
+title: yield() function
+description: The yield() function indicates the input tables received should be delivered as a result of the query.
+menu:
+ v2_0_ref:
+ name: yield
+ parent: Outputs
+ weight: 1
+---
+
+The `yield()` function indicates the input tables received should be delivered as a result of the query.
+Yield outputs the input stream unmodified.
+A query may have multiple results, each identified by the name provided to the `yield()` function.
+
+_**Function type:** Output_
+_**Output data type:** Object_
+
+```js
+yield(name: "custom-name")
+```
+
+{{% note %}}
+`yield()` is implicit for queries that do only one thing and are only needed when using multiple sources in a query.
+With multiple sources, `yield()` is required to specify what is returned, and what name to give it.
+{{% /note %}}
+
+## Parameters
+
+### name
+A unique name for the yielded results.
+Defaults to `"_results"`.
+
+_**Data type:** String_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> yield(name: "1")
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[SELECT AS](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#the-basic-select-statement)
diff --git a/content/v2.0/reference/flux/functions/tests/_index.md b/content/v2.0/reference/flux/functions/tests/_index.md
new file mode 100644
index 000000000..1850a247b
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/tests/_index.md
@@ -0,0 +1,14 @@
+---
+title: Flux testing functions
+description: Flux testing functions test piped-forward data in specific ways and return errors if the tests fail.
+menu:
+ v2_0_ref:
+ name: Tests
+ parent: Flux functions
+ weight: 5
+---
+
+Flux testing functions test piped-forward data in specific ways and return errors if the tests fail.
+The following testing functions are available:
+
+{{< function-list category="Tests" menu="v2_0_ref" >}}
diff --git a/content/v2.0/reference/flux/functions/tests/assertequals.md b/content/v2.0/reference/flux/functions/tests/assertequals.md
new file mode 100644
index 000000000..96c5d0387
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/tests/assertequals.md
@@ -0,0 +1,67 @@
+---
+title: assertEquals() function
+description: The assertEquals() function tests whether two streams have identical data.
+menu:
+ v2_0_ref:
+ name: assertEquals
+ parent: Tests
+ weight: 1
+---
+
+The `assertEquals()` function tests whether two streams have identical data.
+If equal, the function outputs the tested data stream unchanged.
+If unequal, the function outputs an error.
+
+_**Function type:** Test_
+
+```js
+assertEquals(
+ name: "streamEquality",
+ got: got,
+ want: want
+)
+```
+
+_The `assertEquals()` function can be used to perform in-line tests in a query._
+
+## Parameters
+
+## name
+Unique name given to the assertion.
+
+_**Data type:** String_
+
+## got
+The stream containing data to test.
+Defaults to data piped-forward from another function (`<-`).
+
+_**Data type:** Object_
+
+## want
+The stream that contains the expected data to test against.
+
+_**Data type:** Object_
+
+
+## Examples
+
+##### Assert of separate streams
+```js
+want = from(bucket: "backup-telegraf/autogen")
+ |> range(start: -5m)
+
+got = from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+
+assertEquals(got: got, want: want)
+```
+
+##### Inline assertion
+```js
+want = from(bucket: "backup-telegraf/autogen")
+ |> range(start: -5m)
+
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> assertEquals(want: want)
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/_index.md b/content/v2.0/reference/flux/functions/transformations/_index.md
new file mode 100644
index 000000000..dd3996c5b
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/_index.md
@@ -0,0 +1,27 @@
+---
+title: Flux transformation functions
+description: Flux transformation functions transform and shape your data in specific ways.
+menu:
+ v2_0_ref:
+ parent: Flux functions
+ name: Transformations
+ weight: 3
+---
+
+Flux transformation functions transform or shape your data in specific ways.
+There are different types of transformations categorized below:
+
+## [Aggregates](/v2.0/reference/flux/functions/transformations/aggregates)
+Aggregate functions take values from an input table and aggregate them in some way.
+The output table contains is a single row with the aggregated value.
+
+## [Selectors](/v2.0/reference/flux/functions/transformations/selectors)
+Selector functions return one or more records based on function logic.
+The output table is different than the input table, but individual row values are not.
+
+## [Type conversions](/v2.0/reference/flux/functions/transformations/type-conversions)
+Type conversion functions convert the `_value` column of the input table into a specific data type.
+
+## Generic transformations
+
+{{< function-list category="Transformations" menu="v2_0_ref" >}}
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/_index.md b/content/v2.0/reference/flux/functions/transformations/aggregates/_index.md
new file mode 100644
index 000000000..1bde2bc48
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/_index.md
@@ -0,0 +1,47 @@
+---
+title: Flux aggregate functions
+description: Flux aggregate functions take values from an input table and aggregate them in some way.
+menu:
+ v2_0_ref:
+ parent: Transformations
+ name: Aggregates
+ weight: 1
+---
+
+Flux aggregate functions take values from an input table and aggregate them in some way.
+The output table contains is a single row with the aggregated value.
+
+Aggregate operations output a table for every input table they receive.
+A list of columns to aggregate must be provided to the operation.
+The aggregate function is applied to each column in isolation.
+Any output table will have the following properties:
+
+- It always contains a single record.
+- It will have the same group key as the input table.
+- It will contain a column for each provided aggregate column.
+ The column label will be the same as the input table.
+ The type of the column depends on the specific aggregate operation.
+ The value of the column will be `null` if the input table is empty or the input column has only `null` values.
+- It will not have a `_time` column.
+
+### aggregateWindow helper function
+The [`aggregateWindow()` function](/v2.0/reference/flux/functions/transformations/aggregates/aggregatewindow)
+does most of the work needed when aggregating data.
+It windows and aggregates the data, then combines windowed tables into a single output table.
+
+### Aggregate functions
+The following aggregate functions are available:
+
+{{< function-list category="Aggregates" menu="v2_0_ref" >}}
+
+### Aggregate selectors
+The following functions are both aggregates and selectors.
+Each returns `n` values after performing an aggregate operation.
+They are categorized as selector functions in this documentation:
+
+- [highestAverage](/v2.0/reference/flux/functions/transformations/selectors/highestaverage)
+- [highestCurrent](/v2.0/reference/flux/functions/transformations/selectors/highestcurrent)
+- [highestMax](/v2.0/reference/flux/functions/transformations/selectors/highestmax)
+- [lowestAverage](/v2.0/reference/flux/functions/transformations/selectors/lowestaverage)
+- [lowestCurrent](/v2.0/reference/flux/functions/transformations/selectors/lowestcurrent)
+- [lowestMin](/v2.0/reference/flux/functions/transformations/selectors/lowestmin)
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/aggregatewindow.md b/content/v2.0/reference/flux/functions/transformations/aggregates/aggregatewindow.md
new file mode 100644
index 000000000..840beb6b1
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/aggregatewindow.md
@@ -0,0 +1,112 @@
+---
+title: aggregateWindow() function
+description: The aggregateWindow() function applies an aggregate function to fixed windows of time.
+menu:
+ v2_0_ref:
+ name: aggregateWindow
+ parent: Aggregates
+ weight: 1
+---
+
+The `aggregateWindow()` function applies an aggregate function to fixed windows of time.
+
+_**Function type:** Aggregate_
+
+```js
+aggregateWindow(
+ every: 1m,
+ fn: mean,
+ columns: ["_value"],
+ timeColumn: "_stop",
+ timeDst: "_time",
+ createEmpty: true
+)
+```
+
+As data is windowed into separate tables and aggregated, the `_time` column is dropped from each group key.
+This helper copies the timestamp from a remaining column into the `_time` column.
+View the [function definition](#function-definition).
+
+## Parameters
+
+### every
+The duration of windows.
+
+_**Data type:** Duration_
+
+### fn
+The aggregate function used in the operation.
+
+_**Data type:** Function_
+
+### columns
+List of columns on which to operate.
+Defaults to `["_value"]`.
+
+_**Data type:** Array of strings_
+
+### timeColumn
+The time column from which time is copied for the aggregate record.
+Defaults to `"_stop"`.
+
+_**Data type:** String_
+
+### timeDst
+The "time destination" column to which time is copied for the aggregate record.
+Defaults to `"_time"`.
+
+_**Data type:** String_
+
+### createEmpty
+For windows without data, this will create an empty window and fill
+it with a `null` aggregate value.
+Defaults to `true`.
+
+_**Data type:** Boolean_
+
+## Examples
+
+###### Using an aggregate function with default parameters
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: 1h)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent")
+ |> aggregateWindow(
+ every: 5m,
+ fn: mean
+ )
+```
+####### Specifying parameters of the aggregate function
+To use `aggregateWindow()` aggregate functions that don't provide defaults for required parameters,
+for the `fn` parameter, define an anonymous function with `columns` and `tables` parameters
+that pipe-forwards tables into the aggregate function with all required parameters defined:
+
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: 1h)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent")
+ |> aggregateWindow(
+ every: 5m,
+ fn: (columns, tables=<-) => tables |> percentile(percentile: 0.99, columns:columns)
+ )
+```
+
+## Function definition
+```js
+aggregateWindow = (every, fn, columns=["_value"], timeColumn="_stop", timeDst="_time", tables=<-) =>
+ tables
+ |> window(every:every)
+ |> fn(columns:columns)
+ |> duplicate(column:timeColumn, as:timeDst)
+ |> window(every:inf, timeColumn:timeDst)
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[InfluxQL aggregate functions](https://docs.influxdata.com/influxdb/latest/query_language/functions/#aggregations)
+[GROUP BY time()](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#the-group-by-clause)
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/count.md b/content/v2.0/reference/flux/functions/transformations/aggregates/count.md
new file mode 100644
index 000000000..05887f925
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/count.md
@@ -0,0 +1,45 @@
+---
+title: count() function
+description: The count() function outputs the number of non-null records in each aggregated column.
+menu:
+ v2_0_ref:
+ name: count
+ parent: Aggregates
+ weight: 1
+---
+
+The `count()` function outputs the number of records in each aggregated column.
+It counts both null and non-null records.
+
+_**Function type:** Aggregate_
+_**Output data type:** Integer_
+
+```js
+count(columns: ["_value"])
+```
+
+## Parameters
+
+### columns
+A list of columns on which to operate
+Defaults to `["_value"]`.
+
+_**Data type: Array of strings**_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> count()
+```
+
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> count(columns: ["_value"])
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[COUNT()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#count)
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/cov.md b/content/v2.0/reference/flux/functions/transformations/aggregates/cov.md
new file mode 100644
index 000000000..064e86adf
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/cov.md
@@ -0,0 +1,67 @@
+---
+title: cov() function
+description: The cov() function computes the covariance between two streams by first joining the streams, then performing the covariance operation.
+menu:
+ v2_0_ref:
+ name: cov
+ parent: Aggregates
+ weight: 1
+---
+
+The `cov()` function computes the covariance between two streams by first joining the streams,
+then performing the covariance operation.
+
+_**Function type:** Aggregate
+_**Output data type:** Float_
+
+```js
+cov(x: table1, y: table2, on: ["_time", "_field"], pearsonr: false)
+```
+
+## Parameters
+
+### x
+One input stream used to calculate the covariance.
+
+_**Data type:** Object_
+
+### y
+The other input table used to calculate the covariance.
+
+_**Data type:** Object_
+
+### on
+The list of columns on which to join.
+
+_**Data type:** Array of strings_
+
+### pearsonr
+Indicates whether the result should be normalized to be the Pearson R coefficient.
+
+_**Data type:** Boolean_
+
+
+## Examples
+
+```js
+table1 = from(bucket: "telegraf/autogen")
+ |> range(start: -15m)
+ |> filter(fn: (r) =>
+ r._measurement == "measurement_1"
+ )
+
+table2 = from(bucket: "telegraf/autogen")
+ |> range(start: -15m)
+ |> filter(fn: (r) =>
+ r._measurement == "measurement_2"
+ )
+
+cov(x: table1, y: table2, on: ["_time", "_field"])
+```
+
+## Function definition
+```js
+cov = (x,y,on,pearsonr=false) =>
+ join( tables:{x:x, y:y}, on:on )
+ |> covariance(pearsonr:pearsonr, columns:["_value_x","_value_y"])
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/covariance.md b/content/v2.0/reference/flux/functions/transformations/aggregates/covariance.md
new file mode 100644
index 000000000..18124f6ba
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/covariance.md
@@ -0,0 +1,46 @@
+---
+title: covariance() function
+description: The covariance() function computes the covariance between two columns.
+menu:
+ v2_0_ref:
+ name: covariance
+ parent: Aggregates
+ weight: 1
+---
+
+The `covariance()` function computes the covariance between two columns.
+
+_**Function type:** Aggregate_
+_**Output data type:** Float_
+
+```js
+covariance(columns: ["column_x", "column_y"], pearsonr: false, valueDst: "_value")
+```
+
+## Parameters
+
+### columns
+A list of columns on which to operate.
+
+_**Data type:** Array of strings_
+
+{{% note %}}
+Exactly two columns must be provided to the `columns` property.
+{{% /note %}}
+
+### pearsonr
+Indicates whether the result should be normalized to be the Pearson R coefficient.
+
+_**Data type:** Boolean_
+
+### valueDst
+The column into which the result will be placed. Defaults to `"_value"`.
+
+_**Data type:** String_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start:-5m)
+ |> covariance(columns: ["x", "y"])
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/derivative.md b/content/v2.0/reference/flux/functions/transformations/aggregates/derivative.md
new file mode 100644
index 000000000..b7be0572d
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/derivative.md
@@ -0,0 +1,63 @@
+---
+title: derivative() function
+description: The derivative() function computes the rate of change per unit of time between subsequent non-null records.
+menu:
+ v2_0_ref:
+ name: derivative
+ parent: Aggregates
+ weight: 1
+---
+
+The `derivative()` function computes the rate of change per [`unit`](#unit) of time between subsequent non-null records.
+It assumes rows are ordered by the `_time` column.
+The output table schema will be the same as the input table.
+
+_**Function type:** Aggregate_
+_**Output data type:** Float_
+
+```js
+derivative(
+ unit: 1s,
+ nonNegative: false,
+ columns: ["_value"],
+ timeSrc: "_time"
+)
+```
+
+## Parameters
+
+### unit
+The time duration used when creating the derivative.
+Defaults to `1s`.
+
+_**Data type:** Duration_
+
+### nonNegative
+Indicates if the derivative is allowed to be negative.
+When set to `true`, if a value is less than the previous value, it is assumed the previous value should have been a zero.
+
+_**Data type:** Boolean_
+
+### columns
+A list of columns on which to compute the derivative.
+Defaults to `["_value"]`.
+
+_**Data type:** Array of strings_
+
+### timeSrc
+The column containing time values.
+Defaults to `"_time"`.
+
+_**Data type:** String_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> derivative(unit: 1s, nonNegative: true)
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[DERIVATIVE()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#derivative)
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/difference.md b/content/v2.0/reference/flux/functions/transformations/aggregates/difference.md
new file mode 100644
index 000000000..bfd2b9bd2
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/difference.md
@@ -0,0 +1,94 @@
+---
+title: difference() function
+description: The difference() function computes the difference between subsequent non-null records.
+menu:
+ v2_0_ref:
+ name: difference
+ parent: Aggregates
+ weight: 1
+---
+
+The `difference()` function computes the difference between subsequent records.
+Every user-specified column of numeric type is subtracted while others are kept intact.
+
+_**Function type:** Aggregate_
+_**Output data type:** Float_
+
+```js
+difference(nonNegative: false, columns: ["_value"])
+```
+
+## Parameters
+
+### nonNegative
+Indicates if the difference is allowed to be negative.
+When set to `true`, if a value is less than the previous value, it is assumed the previous value should have been a zero.
+
+_**Data type:** Boolean_
+
+### columns
+A list of columns on which to compute the difference.
+Defaults to `["_value"]`.
+
+_**Data type:** Array of strings_
+
+## Subtraction rules for numeric types
+- The difference between two non-null values is their algebraic difference;
+ or `null`, if the result is negative and `nonNegative: true`;
+- `null` minus some value is always `null`;
+- Some value `v` minus `null` is `v` minus the last non-null value seen before `v`;
+ or `null` if `v` is the first non-null value seen.
+
+
+## Examples
+
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> difference()
+```
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> difference(nonNegative: true)
+```
+
+### Example data transformation
+
+###### Input table
+| _time | A | B | C | tag |
+|:-----:|:----:|:----:|:----:|:---:|
+| 0001 | null | 1 | 2 | tv |
+| 0002 | 6 | 2 | null | tv |
+| 0003 | 4 | 2 | 4 | tv |
+| 0004 | 10 | 10 | 2 | tv |
+| 0005 | null | null | 1 | tv |
+
+#### With nonNegative set to false
+```js
+|> difference(nonNegative: false)
+```
+###### Output table
+| _time | A | B | C | tag |
+|:-----:|:----:|:----:|:----:|:---:|
+| 0002 | null | 1 | null | tv |
+| 0003 | -2 | 0 | 2 | tv |
+| 0004 | 6 | 8 | -2 | tv |
+| 0005 | null | null | -1 | tv |
+
+#### With nonNegative set to true
+```js
+|> difference(nonNegative: true):
+```
+###### Output table
+| _time | A | B | C | tag |
+|:-----:|:----:|:----:|:----:|:---:|
+| 0002 | null | 1 | null | tv |
+| 0003 | null | 0 | 2 | tv |
+| 0004 | 6 | 8 | null | tv |
+| 0005 | null | null | null | tv |
+
+
+
+##### Related InfluxQL functions and statements:
+[DIFFERENCE()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#difference)
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/histogramquantile.md b/content/v2.0/reference/flux/functions/transformations/aggregates/histogramquantile.md
new file mode 100644
index 000000000..8f38af6d5
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/histogramquantile.md
@@ -0,0 +1,83 @@
+---
+title: histogramQuantile() function
+description: The `histogramQuantile()` function approximates a quantile given a histogram that approximates the cumulative distribution of the dataset.
+menu:
+ v2_0_ref:
+ name: histogramQuantile
+ parent: Aggregates
+ weight: 1
+---
+
+The `histogramQuantile()` function approximates a quantile given a histogram that
+approximates the cumulative distribution of the dataset.
+Each input table represents a single histogram.
+The histogram tables must have two columns – a count column and an upper bound column.
+
+The count is the number of values that are less than or equal to the upper bound value.
+The table can have any number of records, each representing an entry in the histogram.
+The counts must be monotonically increasing when sorted by upper bound.
+If any values in the count column or upper bound column are `null`, it returns an error.
+
+Linear interpolation between the two closest bounds is used to compute the quantile.
+If the either of the bounds used in interpolation are infinite,
+then the other finite bound is used and no interpolation is performed.
+
+The output table has the same group key as the input table.
+Columns not part of the group key are removed and a single value column of type float is added.
+The count and upper bound columns must not be part of the group key.
+The value column represents the value of the desired quantile from the histogram.
+
+_**Function type:** Aggregate_
+_**Output data type:** Float_
+
+```js
+histogramQuantile(quantile: 0.5, countColumn: "_value", upperBoundColumn: "le", valueColumn: "_value", minValue: 0)
+```
+
+## Parameters
+
+### quantile
+A value between 0 and 1 indicating the desired quantile to compute.
+
+_**Data type:** Float_
+
+### countColumn
+The name of the column containing the histogram counts.
+The count column type must be float.
+Defaults to `"_value"`.
+
+_**Data type:** String_
+
+### upperBoundColumn
+The name of the column containing the histogram upper bounds.
+The upper bound column type must be float.
+Defaults to `"le"`.
+
+_**Data type:** String_
+
+### valueColumn
+The name of the output column which will contain the computed quantile.
+Defaults to `"_value"`.
+
+_**Data type:** String_
+
+### minValue
+The assumed minimum value of the dataset.
+When the quantile falls below the lowest upper bound, interpolation is performed between `minValue` and the lowest upper bound.
+When `minValue` is equal to negative infinity, the lowest upper bound is used.
+Defaults to `0`.
+
+_**Data type:** Float_
+
+{{% note %}}
+When the quantile falls below the lowest upper bound,
+interpolation is performed between `minValue` and the lowest upper bound.
+When `minValue` is equal to negative infinity, the lowest upper bound is used.
+{{% /note %}}
+
+## Examples
+
+##### Compute the 90th quantile
+```js
+histogramQuantile(quantile: 0.9)
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/increase.md b/content/v2.0/reference/flux/functions/transformations/aggregates/increase.md
new file mode 100644
index 000000000..607790c9e
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/increase.md
@@ -0,0 +1,66 @@
+---
+title: increase() function
+description: The increase() function calculates the total non-negative difference between values in a table.
+menu:
+ v2_0_ref:
+ name: increase
+ parent: Aggregates
+ weight: 1
+---
+
+The `increase()` function calculates the total non-negative difference between values in a table.
+A main use case is tracking changes in counter values which may wrap over time
+when they hit a threshold or are reset.
+In the case of a wrap/reset, we can assume that the absolute delta between two
+points will be at least their non-negative difference.
+
+_**Function type:** Aggregate_
+_**Output data type:** Float_
+
+```js
+increase(columns: ["_values"])
+```
+
+## Parameters
+
+### columns
+The list of columns for which the increase is calculated.
+Defaults to `["_value"]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -24h)
+ |> filter(fn: (r) =>
+ r._measurement == "system" and
+ r._field == "n_users"
+ )
+ |> increase()
+```
+
+Given the following input table:
+
+| _time | _value |
+| ----- | ------ |
+| 00001 | 1 |
+| 00002 | 5 |
+| 00003 | 3 |
+| 00004 | 4 |
+
+`increase()` produces the following table:
+
+| _time | _value |
+| ----- | ------ |
+| 00002 | 4 |
+| 00003 | 7 |
+| 00004 | 8 |
+
+## Function definition
+```js
+increase = (tables=<-, columns=["_value"]) =>
+ tables
+ |> difference(nonNegative: true, columns:columns)
+ |> cumulativeSum()
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/integral.md b/content/v2.0/reference/flux/functions/transformations/aggregates/integral.md
new file mode 100644
index 000000000..3bc7206ab
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/integral.md
@@ -0,0 +1,48 @@
+---
+title: integral() function
+description: The integral() function computes the area under the curve per unit of time of subsequent non-null records.
+menu:
+ v2_0_ref:
+ name: integral
+ parent: Aggregates
+ weight: 1
+---
+
+The `integral()` function computes the area under the curve per [`unit`](#unit) of time of subsequent non-null records.
+The curve is defined using `_time` as the domain and record values as the range.
+
+_**Function type:** Aggregate_
+_**Output data type:** Float_
+
+```js
+integral(unit: 10s, columns: ["_value"])
+```
+
+## Parameters
+
+### unit
+The time duration used when computing the integral.
+
+_**Data type:** Duration_
+
+### columns
+A list of columns on which to operate.
+Defaults to `["_value"]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system"
+ )
+ |> integral(unit:10s)
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[INTEGRAL()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#integral)
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/mean.md b/content/v2.0/reference/flux/functions/transformations/aggregates/mean.md
new file mode 100644
index 000000000..6eb3691a3
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/mean.md
@@ -0,0 +1,42 @@
+---
+title: mean() function
+description: The mean() function computes the mean or average of non-null records in the input table.
+menu:
+ v2_0_ref:
+ name: mean
+ parent: Aggregates
+ weight: 1
+---
+
+The `mean()` function computes the mean or average of non-null records in the input table.
+
+_**Function type:** Aggregate_
+_**Output data type:** Float_
+
+```js
+mean(columns: ["_value"])
+```
+
+## Parameters
+
+### columns
+A list of columns on which to compute the mean.
+Defaults to `["_value"]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent")
+ |> range(start:-12h)
+ |> window(every:10m)
+ |> mean()
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[MEAN()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#mean)
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/median.md b/content/v2.0/reference/flux/functions/transformations/aggregates/median.md
new file mode 100644
index 000000000..ffc15ceca
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/median.md
@@ -0,0 +1,97 @@
+---
+title: median() function
+description: The `median()` function returns the median `_value` of an input table or all non-null records in the input table with values that fall within the 50th percentile.
+menu:
+ v2_0_ref:
+ name: median
+ parent: Aggregates
+ weight: 1
+---
+
+The `median()` function is a special application of the [`percentile()` function](/v2.0/reference/flux/functions/transformations/aggregates/percentile)
+that returns the median `_value` of an input table or all non-null records in the input table
+with values that fall within the 50th percentile depending on the [method](#method) used.
+
+_**Function type:** Selector or Aggregate_
+_**Output data type:** Object_
+
+
+```js
+median(method: "estimate_tdigest", compression: 0.0)
+```
+
+When using the `estimate_tdigest` or `exact_mean` methods, it outputs non-null
+records with values that fall within the 50th percentile.
+
+When using the `exact_selector` method, it outputs the non-null record with the
+value that represents the 50th percentile.
+
+{{% note %}}
+The `median()` function can only be used with float value types.
+It is a special application of the [`percentile()` function](/v2.0/reference/flux/functions/transformations/aggregates/percentile) which
+uses an approximation implementation that requires floats.
+You can convert your value column to a float column using the [`toFloat()` function](/v2.0/reference/flux/functions/transformations/type-conversions/tofloat).
+{{% /note %}}
+
+## Parameters
+
+### method
+Defines the method of computation. Defaults to `"estimate_tdigest"`.
+
+_**Data type:** String_
+
+The available options are:
+
+##### estimate_tdigest
+An aggregate method that uses a [t-digest data structure](https://github.com/tdunning/t-digest)
+to compute an accurate percentile estimate on large data sources.
+
+##### exact_mean
+An aggregate method that takes the average of the two points closest to the percentile value.
+
+##### exact_selector
+A selector method that returns the data point for which at least percentile points are less than.
+
+### compression
+Indicates how many centroids to use when compressing the dataset.
+A larger number produces a more accurate result at the cost of increased memory requirements.
+Defaults to 1000.
+
+_**Data type:** Float_
+
+## Examples
+
+###### Median as an aggregate
+```js
+from(bucket: "telegraf/autogen")
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> range(start:-12h)
+ |> window(every:10m)
+ |> median()
+```
+
+###### Median as a selector
+```js
+from(bucket: "telegraf/autogen")
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> range(start:-12h)
+ |> window(every:10m)
+ |> median(method: "exact_selector")
+```
+
+## Function definition
+```js
+median = (method="estimate_tdigest", compression=0.0, tables=<-) =>
+ percentile(percentile:0.5, method:method, compression:compression)
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[MEDIAN()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#median)
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/pearsonr.md b/content/v2.0/reference/flux/functions/transformations/aggregates/pearsonr.md
new file mode 100644
index 000000000..e927d2dea
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/pearsonr.md
@@ -0,0 +1,61 @@
+---
+title: pearsonr() function
+description: The pearsonr() function computes the Pearson R correlation coefficient between two streams by first joining the streams, then performing the covariance operation normalized to compute R.
+menu:
+ v2_0_ref:
+ name: pearsonr
+ parent: Aggregates
+ weight: 1
+---
+
+The `pearsonr()` function computes the Pearson R correlation coefficient between two streams
+by first joining the streams, then performing the covariance operation normalized to compute R.
+
+_**Function type:** Aggregate_
+_**Output data type:** Float_
+
+```js
+pearsonr(x: stream1, y: stream2, on: ["_time", "_field"])
+```
+
+## Parameters
+
+### x
+First input stream used in the operation.
+
+_**Data type:** Object_
+
+### y
+Second input stream used in the operation.
+
+_**Data type:** Object_
+
+### on
+The list of columns on which to join.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+stream1 = from(bucket:"telegraf/autogen")
+ |> range(start:-15m)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used"
+ )
+
+stream2 = from(bucket:"telegraf/autogen")
+ |> range(start:-15m)
+ |> filter(fn: (r) => r
+ ._measurement == "mem" and
+ r._field == "available"
+ )
+
+pearsonr(x: stream1, y: stream2, on: ["_time", "_field"])
+```
+
+## Function definition
+```js
+pearsonr = (x,y,on) =>
+ cov(x:x, y:y, on:on, pearsonr:true)
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/percentile.md b/content/v2.0/reference/flux/functions/transformations/aggregates/percentile.md
new file mode 100644
index 000000000..c88250fe5
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/percentile.md
@@ -0,0 +1,97 @@
+---
+title: percentile() function
+description: The percentile() function outputs non-null records with values that fall within the specified percentile or the non-null record with the value that represents the specified percentile.
+menu:
+ v2_0_ref:
+ name: percentile
+ parent: Aggregates
+ weight: 1
+---
+
+The `percentile()` function returns records from an input table with `_value`s that fall within
+a specified percentile or it returns the record with the `_value` that represents the specified percentile.
+Which it returns depends on the [method](#method) used.
+
+_**Function type:** Aggregate or Selector_
+_**Output data type:** Float or Object_
+
+```js
+percentile(columns: ["_value"], percentile: 0.99, method: "estimate_tdigest", compression: 1000)
+```
+
+When using the `estimate_tdigest` or `exact_mean` methods, it outputs non-null
+records with values that fall within the specified percentile.
+
+When using the `exact_selector` method, it outputs the non-null record with the
+value that represents the specified percentile.
+
+## Parameters
+
+### columns
+A list of columns on which to compute the percentile.
+Defaults to `["_value"]`.
+
+_**Data type:** Array of strings_
+
+### percentile
+A value between 0 and 1 indicating the desired percentile.
+
+_**Data type:** Float_
+
+### method
+Defines the method of computation.
+
+_**Data type:** String_
+
+The available options are:
+
+##### estimate_tdigest
+An aggregate method that uses a [t-digest data structure](https://github.com/tdunning/t-digest)
+to compute an accurate percentile estimate on large data sources.
+
+##### exact_mean
+An aggregate method that takes the average of the two points closest to the percentile value.
+
+##### exact_selector
+A selector method that returns the data point for which at least percentile points are less than.
+
+### compression
+Indicates how many centroids to use when compressing the dataset.
+A larger number produces a more accurate result at the cost of increased memory requirements.
+Defaults to 1000.
+
+_**Data type:** Float_
+
+## Examples
+
+###### Percentile as an aggregate
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system")
+ |> percentile(
+ percentile: 0.99,
+ method: "estimate_tdigest",
+ compression: 1000
+ )
+```
+
+###### Percentile as a selector
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system")
+ |> percentile(
+ percentile: 0.99,
+ method: "exact_selector"
+ )
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[PERCENTILE()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#percentile)
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/skew.md b/content/v2.0/reference/flux/functions/transformations/aggregates/skew.md
new file mode 100644
index 000000000..f92ad9b19
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/skew.md
@@ -0,0 +1,36 @@
+---
+title: skew() function
+description: The skew() function outputs the skew of non-null records as a float.
+menu:
+ v2_0_ref:
+ name: skew
+ parent: Aggregates
+ weight: 1
+---
+
+The `skew()` function outputs the skew of non-null records as a float.
+
+_**Function type:** Aggregate_
+_**Output data type:** Float_
+
+```js
+skew(columns: ["_value"])
+```
+
+## Parameters
+
+### columns
+Specifies a list of columns on which to operate. Defaults to `["_value"]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system"
+ )
+ |> skew()
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/spread.md b/content/v2.0/reference/flux/functions/transformations/aggregates/spread.md
new file mode 100644
index 000000000..4948198c6
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/spread.md
@@ -0,0 +1,46 @@
+---
+title: spread() function
+description: The spread() function outputs the difference between the minimum and maximum values in each specified column.
+menu:
+ v2_0_ref:
+ name: spread
+ parent: Aggregates
+ weight: 1
+---
+
+The `spread()` function outputs the difference between the minimum and maximum values in each specified column.
+Only `uint`, `int`, and `float` column types can be used.
+The type of the output column depends on the type of input column:
+
+- For input columns with type `uint` or `int`, the output is an `int`
+- For input columns with type `float` the output is a float.
+
+_**Function type:** Aggregate_
+_**Output data type:** Integer or Float (inherited from input column type)_
+
+```js
+spread(columns: ["_value"])
+```
+
+## Parameters
+
+### columns
+Specifies a list of columns on which to operate. Defaults to `["_value"]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system"
+ )
+ |> spread()
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[SPREAD()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#spread)
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/stddev.md b/content/v2.0/reference/flux/functions/transformations/aggregates/stddev.md
new file mode 100644
index 000000000..4bccc283c
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/stddev.md
@@ -0,0 +1,42 @@
+---
+title: stddev() function
+description: The stddev() function computes the standard deviation of non-null records in specified columns.
+menu:
+ v2_0_ref:
+ name: stddev
+ parent: Aggregates
+ weight: 1
+---
+
+The `stddev()` function computes the standard deviation of non-null records in specified columns.
+
+_**Function type:** Aggregate_
+_**Output data type:** Float_
+
+```js
+stddev(columns: ["_value"])
+```
+
+## Parameters
+
+### columns
+Specifies a list of columns on which to operate.
+Defaults to `["_value"]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system"
+ )
+ |> stddev()
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[STDDEV()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#stddev)
diff --git a/content/v2.0/reference/flux/functions/transformations/aggregates/sum.md b/content/v2.0/reference/flux/functions/transformations/aggregates/sum.md
new file mode 100644
index 000000000..3338bd9f8
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/aggregates/sum.md
@@ -0,0 +1,42 @@
+---
+title: sum() function
+description: The sum() function computes the sum of non-null records in specified columns.
+menu:
+ v2_0_ref:
+ name: sum
+ parent: Aggregates
+ weight: 1
+---
+
+The `sum()` function computes the sum of non-null records in specified columns.
+
+_**Function type:** Aggregate_
+_**Output data type:** Integer, UInteger, or Float (inherited from column type)_
+
+```js
+sum(columns: ["_value"])
+```
+
+## Parameters
+
+### columns
+Specifies a list of columns on which to operate.
+Defaults to `["_value"]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system"
+ )
+ |> sum()
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[SUM()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#sum)
diff --git a/content/v2.0/reference/flux/functions/transformations/columns.md b/content/v2.0/reference/flux/functions/transformations/columns.md
new file mode 100644
index 000000000..e51ddece6
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/columns.md
@@ -0,0 +1,57 @@
+---
+title: columns() function
+description: >
+ The columns() function lists the column labels of input tables.
+ For each input table, it outputs a table with the same group key columns,
+ plus a new column containing the labels of the input table's columns.
+menu:
+ v2_0_ref:
+ name: columns
+ parent: Transformations
+ weight: 1
+---
+
+The `columns()` function lists the column labels of input tables.
+For each input table, it outputs a table with the same group key columns,
+plus a new column containing the labels of the input table's columns.
+Each row in an output table contains the group key value and the label of one column of the input table.
+Each output table has the same number of rows as the number of columns of the input table.
+
+_**Function type:** Transformation_
+
+```js
+columns(column: "_value")
+```
+
+## Parameters
+
+### column
+The name of the output column in which to store the column labels.
+Defaults to `"_value"`.
+
+_**Data type:** String_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -30m)
+ |> columns(column: "labels")
+```
+
+##### Get every possible column label in a single table
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -30m)
+ |> columns()
+ |> keep(columns: ["_value"])
+ |> group()
+ |> distinct()
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[SHOW MEASUREMENTS](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-measurements)
+[SHOW FIELD KEYS](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-field-keys)
+[SHOW TAG KEYS](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-tag-keys)
+[SHOW SERIES](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-tag-keys)
diff --git a/content/v2.0/reference/flux/functions/transformations/cumulativesum.md b/content/v2.0/reference/flux/functions/transformations/cumulativesum.md
new file mode 100644
index 000000000..a44bc72a1
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/cumulativesum.md
@@ -0,0 +1,43 @@
+---
+title: cumulativeSum() function
+description: The cumulativeSum() function computes a running sum for non-null records in the table.
+menu:
+ v2_0_ref:
+ name: cumulativeSum
+ parent: Transformations
+ weight: 1
+---
+
+The `cumulativeSum()` function computes a running sum for non-null records in the table.
+The output table schema will be the same as the input table.
+
+_**Function type:** Transformation
+_**Output data type:** Float_
+
+```js
+cumulativeSum(columns: ["_value"])
+```
+
+## Parameters
+
+### columns
+A list of columns on which to operate.
+Defaults to `["_value"]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) =>
+ r._measurement == "disk" and
+ r._field == "used_percent"
+ )
+ |> cumulativeSum(columns: ["_value"])
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[CUMULATIVE_SUM()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#cumulative-sum)
diff --git a/content/v2.0/reference/flux/functions/transformations/drop.md b/content/v2.0/reference/flux/functions/transformations/drop.md
new file mode 100644
index 000000000..d7c3b2690
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/drop.md
@@ -0,0 +1,61 @@
+---
+title: drop() function
+description: The drop() function removes specified columns from a table.
+menu:
+ v2_0_ref:
+ name: drop
+ parent: Transformations
+ weight: 1
+---
+
+The `drop()` function removes specified columns from a table.
+Columns are specified either through a list or a predicate function.
+When a dropped column is part of the group key, it will be removed from the key.
+If a specified column is not present in a table, it will return an error.
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+```js
+drop(columns: ["col1", "col2"])
+
+// OR
+
+drop(fn: (column) => column =~ /usage*/)
+```
+
+## Parameters
+
+### columns
+Columns to be removed from the table.
+Cannot be used with `fn`.
+
+_**Data type:** Array of strings_
+
+### fn
+A predicate function which takes a column name as a parameter (`column`) and returns
+a boolean indicating whether or not the column should be removed from the table.
+Cannot be used with `columns`.
+
+_**Data type:** Function_
+
+## Examples
+
+##### Drop a list of columns
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> drop(columns: ["host", "_measurement"])
+```
+
+##### Drop columns matching a predicate
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> drop(fn: (column) => column =~ /usage*/)
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[DROP MEASUREMENT](https://docs.influxdata.com/influxdb/latest/query_language/database_management/#delete-measurements-with-drop-measurement)
diff --git a/content/v2.0/reference/flux/functions/transformations/duplicate.md b/content/v2.0/reference/flux/functions/transformations/duplicate.md
new file mode 100644
index 000000000..bb69f165e
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/duplicate.md
@@ -0,0 +1,40 @@
+---
+title: duplicate() function
+description: The duplicate() function duplicates a specified column in a table.
+menu:
+ v2_0_ref:
+ name: duplicate
+ parent: Transformations
+ weight: 1
+---
+
+The `duplicate()` function duplicates a specified column in a table.
+If the specified column is part of the group key, it will be duplicated, but will
+not be part of the output table's group key.
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+```js
+duplicate(column: "column-name", as: "duplicate-name")
+```
+
+## Parameters
+
+### column
+The column to duplicate.
+
+_**Data type:** String_
+
+### as
+The name assigned to the duplicate column.
+
+_**Data type:** String_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start:-5m)
+ |> filter(fn: (r) => r._measurement == "cpu")
+ |> duplicate(column: "host", as: "server")
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/fill.md b/content/v2.0/reference/flux/functions/transformations/fill.md
new file mode 100644
index 000000000..7e221cad7
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/fill.md
@@ -0,0 +1,72 @@
+---
+title: fill() function
+description: The fill() function filters data based on conditions defined in a predicate function (fn).
+menu:
+ v2_0_ref:
+ name: fill
+ parent: Transformations
+ weight: 1
+---
+
+The `filter()` function replaces all null values in an input stream and replace them with a non-null value.
+The output stream is the same as the input stream with all null values replaced in the specified column.
+
+_**Function type:** Transformation_
+
+```js
+fill(column: "_value", value: 0.0)
+
+// OR
+
+fill(column: "_value", usePrevious: true)
+```
+
+## Parameters
+
+### column
+The column in which to replace null values. Defaults to `"_value"`.
+
+_**Data type:** String_
+
+### value
+The constant value to use in place of nulls.
+The value type must match the value type of the `column`.
+
+_**Data type:** Boolean | Integer | UInteger | Float | String | Time | Duration_
+
+### usePrevious
+When `true`, assigns the value set in the previous non-null row.
+
+> Cannot be used with `value`.
+
+_**Data type:** Boolean | Integer | UInteger | Float | String | Time | Duration_
+
+
+## Examples
+
+##### Fill null values with a specified non-null value
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -1h)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r.cpu == "cpu-total"
+ )
+ |> fill(value: 0.0)
+```
+
+##### Fill null values with the previous non-null value
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -1h)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r.cpu == "cpu-total"
+ )
+ |> fill(usePrevious: true)
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[FILL](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#group-by-time-intervals-and-fill)
diff --git a/content/v2.0/reference/flux/functions/transformations/filter.md b/content/v2.0/reference/flux/functions/transformations/filter.md
new file mode 100644
index 000000000..b56df7562
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/filter.md
@@ -0,0 +1,48 @@
+---
+title: filter() function
+description: The filter() function filters data based on conditions defined in a predicate function (fn).
+menu:
+ v2_0_ref:
+ name: filter
+ parent: Transformations
+ weight: 1
+---
+
+The `filter()` function filters data based on conditions defined in a predicate function ([`fn`](#fn)).
+The output tables have the same schema as the corresponding input tables.
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+```js
+filter(fn: (r) => r._measurement == "cpu")
+```
+
+## Parameters
+
+### fn
+A single argument function that evaluates true or false.
+Records are passed to the function.
+Those that evaluate to true are included in the output tables.
+
+_**Data type:** Function_
+
+{{% note %}}
+Objects evaluated in `fn` functions are represented by `r`, short for "record" or "row".
+{{% /note %}}
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system" and
+ r.cpu == "cpu-total"
+ )
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[SELECT](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#the-basic-select-statement)
diff --git a/content/v2.0/reference/flux/functions/transformations/group.md b/content/v2.0/reference/flux/functions/transformations/group.md
new file mode 100644
index 000000000..ad7a78045
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/group.md
@@ -0,0 +1,80 @@
+---
+title: group() function
+description: The group() function groups records based on their values for specific columns.
+menu:
+ v2_0_ref:
+ name: group
+ parent: Transformations
+ weight: 1
+---
+
+The `group()` function groups records based on their values for specific columns.
+It produces tables with new group keys based on provided properties.
+
+_**Function type:** Transformation_
+
+```js
+group(columns: ["host", "_measurement"], mode:"by")
+
+// OR
+
+group(columns: ["_time"], mode:"except")
+
+// OR
+
+group()
+```
+
+## Parameters
+
+### columns
+List of columns to use in the grouping operation.
+Defaults to `[]`.
+
+_**Data type:** Array of strings_
+
+### mode
+The mode used to group columns.
+
+_**Data type:** String_
+
+The following options are available:
+
+- by
+- except
+
+Defaults to `"by"`.
+
+#### by
+Groups records by columns defined in the [`columns`](#columns) parameter.
+
+#### except
+Groups records by all columns **except** those defined in the [`columns`](#columns) parameter.
+
+## Examples
+
+###### Group by host and measurement
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -30m)
+ |> group(columns: ["host", "_measurement"])
+```
+
+###### Group by everything except time
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -30m)
+ |> group(columns: ["_time"], mode: "except")
+```
+
+###### Remove all grouping
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -30m)
+ |> group()
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[GROUP BY](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#the-group-by-clause) _(similar but different)_
diff --git a/content/v2.0/reference/flux/functions/transformations/histogram.md b/content/v2.0/reference/flux/functions/transformations/histogram.md
new file mode 100644
index 000000000..73fbb230f
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/histogram.md
@@ -0,0 +1,78 @@
+---
+title: histogram() function
+description: The histogram() function approximates the cumulative distribution of a dataset by counting data frequencies for a list of bins.
+menu:
+ v2_0_ref:
+ name: histogram
+ parent: Transformations
+ weight: 1
+---
+
+The `histogram()` function approximates the cumulative distribution of a dataset by counting data frequencies for a list of bins.
+A bin is defined by an upper bound where all data points that are less than or equal to the bound are counted in the bin.
+The bin counts are cumulative.
+
+Each input table is converted into a single output table representing a single histogram.
+The output table has a the same group key as the input table.
+Columns not part of the group key are removed and an upper bound column and a count column are added.
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+```js
+histogram(column: "_value", upperBoundColumn: "le", countColumn: "_value", bins: [50.0, 75.0, 90.0], normalize: false)
+```
+
+## Parameters
+
+### column
+The name of a column containing input data values.
+The column type must be float.
+Defaults to `"_value"`.
+
+_**Data type:** String_
+
+### upperBoundColumn
+The name of the column in which to store the histogram's upper bounds.
+Defaults to `"le"`.
+
+_**Data type:** String_
+
+### countColumn
+The name of the column in which to store the histogram counts.
+Defaults to `"_value"`.
+
+_**Data type:** String_
+
+### bins
+A list of upper bounds to use when computing the histogram frequencies.
+Bins should contain a bin whose bound is the maximum value of the data set.
+This value can be set to positive infinity if no maximum is known.
+
+_**Data type:** Array of floats_
+
+#### Bin helper functions
+The following helper functions can be used to generated bins.
+
+[linearBins()](/v2.0/reference/flux/functions/misc/linearbins)
+[logarithmicBins()](/v2.0/reference/flux/functions/misc/logarithmicbins)
+
+### normalize
+When `true`, will convert the counts into frequency values between 0 and 1.
+Defaults to `false`.
+
+_**Data type:** Boolean_
+
+{{% note %}}
+Normalized histograms cannot be aggregated by summing their counts.
+{{% /note %}}
+
+## Examples
+
+##### Histogram with dynamically generated bins
+```js
+// Dynamically generate 10 bins from 0,10,20,...,100
+histogram(
+ bins: linearBins(start:0.0, width:10.0, count:10)
+)
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/influxfieldsascols.md b/content/v2.0/reference/flux/functions/transformations/influxfieldsascols.md
new file mode 100644
index 000000000..f0a5ec269
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/influxfieldsascols.md
@@ -0,0 +1,40 @@
+---
+title: influxFieldsAsCols() function
+description: The influxFieldsAsCols() function is pivots a table and automatically aligns fields within each input table that have the same timestamp.
+aliases:
+ - /v2.0/reference/flux/functions/inputs/fromrows
+menu:
+ v2_0_ref:
+ name: influxFieldsAsCols
+ parent: Transformations
+ weight: 1
+---
+
+The `influxFieldsAsCols()` function is a special application of the `pivot()` function that
+automatically aligns fields within each input table that have the same timestamp.
+
+_**Function type:** Transformation_
+
+```js
+influxFieldsAsCols()
+```
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start: -1h)
+ |> filter(fn: (r) => r._measurement == "cpu")
+ |> influxFieldsAsCols()
+ |> keep(columns: ["_time", "cpu", "usage_idle", "usage_user"])
+```
+
+## Function definition
+```js
+influxFieldsAsCols = (tables=<-) =>
+ tables
+ |> pivot(
+ rowKey:["_time"],
+ columnKey: ["_field"],
+ valueColumn: "_value"
+ )
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/join.md b/content/v2.0/reference/flux/functions/transformations/join.md
new file mode 100644
index 000000000..ceeb95f55
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/join.md
@@ -0,0 +1,133 @@
+---
+title: join() function
+description: The join() function merges two or more input streams whose values are equal on a set of common columns into a single output stream.
+menu:
+ v2_0_ref:
+ name: join
+ parent: Transformations
+ weight: 1
+---
+
+The `join()` function merges two or more input streams whose values are equal on
+a set of common columns into a single output stream.
+Null values are not considered equal when comparing column values.
+The resulting schema is the union of the input schemas.
+The resulting group key is the union of the input group keys.
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+```js
+join(tables: {key1: table1, key2: table2}, on: ["_time", "_field"], method: "inner")
+```
+
+#### Output schema
+The column schema of the output stream is the union of the input schemas.
+It is also the same for the output group key.
+Columns are renamed using the pattern `_` to prevent ambiguity in joined tables.
+
+##### Example:
+If you have two streams of data, **data_1** and **data_2**, with the following group keys:
+
+**data_1**: `[_time, _field]`
+**data_2**: `[_time, _field]`
+
+And join them with:
+
+```js
+join(tables: {d1: data_1, d2: data_2}, on: ["_time"])
+```
+
+The resulting group keys for all tables will be: `[_time, _field_d1, _field_d2]`
+
+
+## Parameters
+
+### tables
+The map of streams to be joined. Required.
+
+_**Data type:** Object_
+
+> `join()` currently only supports two input streams.
+
+### on
+The list of columns on which to join. Required.
+
+_**Data type:** Array of strings_
+
+### method
+The method used to join. Defaults to `"inner"`.
+
+_**Data type:** String_
+
+###### Possible Values:
+- `inner`
+- `cross`
+- `left`
+- `right`
+- `full`
+
+{{% note %}}
+The `on` parameter and the cross method are mutually exclusive.
+{{% /note %}}
+
+## Examples
+
+#### Example join with sample data
+
+Given the following two streams of data:
+
+##### SF_Temp**
+
+| _time | _field | _value |
+| ------ |:------:| -------:|
+| 0001 | "temp" | 70 |
+| 0002 | "temp" | 75 |
+| 0003 | "temp" | 72 |
+
+##### NY_Temp**
+
+| _time | _field | _value |
+| ------ |:------:| -------:|
+| 0001 | "temp" | 55 |
+| 0002 | "temp" | 56 |
+| 0003 | "temp" | 55 |
+
+And the following join query:
+
+```js
+join(
+ tables: {sf: SF_Temp, ny: NY_Temp},
+ on: ["_time", "_field"]
+)
+```
+
+The output will be:
+
+| _time | _field | _value_ny | _value_sf |
+| ----- | ------ | ---------:| ---------:|
+| 0001 | "temp" | 55 | 70 |
+| 0002 | "temp" | 56 | 75 |
+| 0003 | "temp" | 55 | 72 |
+
+#### Cross-measurement join
+```js
+data_1 = from(bucket:"telegraf/autogen")
+ |> range(start:-15m)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system"
+ )
+
+data_2 = from(bucket:"telegraf/autogen")
+ |> range(start:-15m)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+
+join(
+ tables: {d1: data_1, d2: data_2},
+ on: ["_time", "host"]
+)
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/keep.md b/content/v2.0/reference/flux/functions/transformations/keep.md
new file mode 100644
index 000000000..4b8f791f6
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/keep.md
@@ -0,0 +1,55 @@
+---
+title: keep() function
+description: The keep() function returns a table containing only the specified columns.
+menu:
+ v2_0_ref:
+ name: keep
+ parent: Transformations
+ weight: 1
+---
+
+The `keep()` function returns a table containing only the specified columns, ignoring all others.
+Only columns in the group key that are also specified in the `keep()` function will be kept in the resulting group key.
+_It is the inverse of [`drop`](/v2.0/reference/flux/functions/transformations/drop)._
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+```js
+keep(columns: ["col1", "col2"])
+
+// OR
+
+keep(fn: (column) => column =~ /inodes*/)
+```
+
+## Parameters
+
+### columns
+Columns that should be included in the resulting table.
+Cannot be used with `fn`.
+
+_**Data type:** Array of strings_
+
+### fn
+A predicate function which takes a column name as a parameter (`column`) and returns
+a boolean indicating whether or not the column should be included in the resulting table.
+Cannot be used with `columns`.
+
+_**Data type:** Function_
+
+## Examples
+
+##### Keep a list of columns
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> keep(columns: ["_time", "_value"])
+```
+
+##### Keep all columns matching a predicate
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> keep(fn: (column) => column =~ /inodes*/)
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/keys.md b/content/v2.0/reference/flux/functions/transformations/keys.md
new file mode 100644
index 000000000..ef38e9c94
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/keys.md
@@ -0,0 +1,57 @@
+---
+title: keys() function
+description: >
+ The keys() function outputs the group key of input tables.
+ For each input table, it outputs a table with the same group key columns, plus a
+ _value column containing the labels of the input table's group key.
+menu:
+ v2_0_ref:
+ name: keys
+ parent: Transformations
+ weight: 1
+---
+
+The `keys()` function outputs the group key of input tables.
+For each input table, it outputs a table with the same group key columns, plus a
+`_value` column containing the labels of the input table's group key.
+Each row in an output table contains the group key value and the label of one column in the group key of the input table.
+Each output table has the same number of rows as the size of the group key of the input table.
+
+_**Function type:** Transformation_
+
+```js
+keys(column: "_value")
+```
+
+## Parameters
+
+### column
+The name of the output column in which to store the group key labels.
+Defaults to `"_value"`.
+
+_**Data type:** String_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -30m)
+ |> keys(column: "keys")
+```
+
+##### Return every possible key in a single table
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -30m)
+ |> keys()
+ |> keep(columns: ["_value"])
+ |> group()
+ |> distinct()
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[SHOW MEASUREMENTS](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-measurements)
+[SHOW FIELD KEYS](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-field-keys)
+[SHOW TAG KEYS](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-tag-keys)
+[SHOW SERIES](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-tag-keys)
diff --git a/content/v2.0/reference/flux/functions/transformations/keyvalues.md b/content/v2.0/reference/flux/functions/transformations/keyvalues.md
new file mode 100644
index 000000000..8045d5ed4
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/keyvalues.md
@@ -0,0 +1,75 @@
+---
+title: keyValues() function
+description: The keyValues() function returns a table with the input table's group key plus two columns, _key and _value, that correspond to unique column + value pairs from the input table.
+menu:
+ v2_0_ref:
+ name: keyValues
+ parent: Transformations
+ weight: 1
+---
+
+The `keyValues()` function returns a table with the input table's group key plus two columns,
+`_key` and `_value`, that correspond to unique column + value pairs from the input table.
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+```js
+keyValues(keyColumns: ["usage_idle", "usage_user"])
+
+// OR
+
+keyValues(fn: (schema) => schema.columns |> filter(fn: (r) => r.label =~ /usage_.*/))
+```
+
+## Parameters
+
+{{% note %}}
+`keyColumns` and `fn` are mutually exclusive. Only one may be used at a time.
+{{% /note %}}
+
+### keyColumns
+A list of columns from which values are extracted.
+All columns indicated must be of the same type.
+Each input table must have all of the columns listed by the `keyColumns` parameter.
+
+_**Data type:** Array of strings_
+
+### fn
+Function used to identify a set of columns.
+All columns indicated must be of the same type.
+
+_**Data type:** Function_
+
+## Additional requirements
+
+- Only one of `keyColumns` or `fn` may be used in a single call.
+- All columns indicated must be of the same type.
+- Each input table must have all of the columns listed by the `keyColumns` parameter.
+
+## Examples
+
+##### Get key values from explicitly defined columns
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -30m)
+ |> filter(fn: (r) => r._measurement == "cpu")
+ |> keyValues(keyColumns: ["usage_idle", "usage_user"])
+```
+
+##### Get key values from columns matching a regular expression
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -30m)
+ |> filter(fn: (r) => r._measurement == "cpu")
+ |> keyValues(fn: (schema) => schema.columns |> filter(fn: (r) => r.label =~ /usage_.*/))
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[SHOW MEASUREMENTS](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-measurements)
+[SHOW FIELD KEYS](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-field-keys)
+[SHOW TAG KEYS](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-tag-keys)
+[SHOW TAG VALUES](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-tag-values)
+[SHOW SERIES](https://docs.influxdata.com/influxdb/latest/query_language/schema_exploration/#show-series)
diff --git a/content/v2.0/reference/flux/functions/transformations/limit.md b/content/v2.0/reference/flux/functions/transformations/limit.md
new file mode 100644
index 000000000..25561ef04
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/limit.md
@@ -0,0 +1,46 @@
+---
+title: limit() function
+description: The limit() function limits the number of records in output tables to a fixed number (n).
+menu:
+ v2_0_ref:
+ name: limit
+ parent: Transformations
+ weight: 1
+---
+
+The `limit()` function limits the number of records in output tables to a fixed number ([`n`](#n)).
+One output table is produced for each input table.
+Each output table contains the first `n` records after the first `offset` records of the input table.
+If the input table has less than `offset + n` records, all records except the first `offset` ones are output.
+
+_**Function type:** Filter_
+_**Output data type:** Object_
+
+```js
+limit(n:10, offset: 0)
+```
+
+## Parameters
+
+### n
+The maximum number of records to output.
+
+_**Data type:** Integer_
+
+### offset
+The number of records to skip per table before limiting to `n`.
+Defaults to `0`.
+
+_**Data type:** Integer_
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> limit(n:10, offset: 1)
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[LIMIT](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#the-limit-and-slimit-clauses)
diff --git a/content/v2.0/reference/flux/functions/transformations/map.md b/content/v2.0/reference/flux/functions/transformations/map.md
new file mode 100644
index 000000000..a9a8188e2
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/map.md
@@ -0,0 +1,72 @@
+---
+title: map() function
+description: The map() function applies a function to each record in the input tables.
+menu:
+ v2_0_ref:
+ name: map
+ parent: Transformations
+ weight: 1
+---
+
+The `map()` function applies a function to each record in the input tables.
+The modified records are assigned to new tables based on the group key of the input table.
+The output tables are the result of applying the map function to each record of the input tables.
+
+When the output record contains a different value for the group key, the record is regrouped into the appropriate table.
+When the output record drops a column that was part of the group key, that column is removed from the group key.
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+```js
+map(fn: (r) => r._value * r._value), mergeKey: true)
+```
+
+## Parameters
+
+### fn
+A single argument function that to apply to each record.
+The return value must be an object.
+
+_**Data type:** Function_
+
+{{% note %}}
+Objects evaluated in `fn` functions are represented by `r`, short for "record" or "row".
+{{% /note %}}
+
+### mergeKey
+Indicates if the record returned from `fn` should be merged with the group key.
+When merging, all columns on the group key will be added to the record giving precedence to any columns already present on the record.
+When not merging, only columns defined on the returned record will be present on the output records.
+Defaults to `true`.
+
+_**Data type:** Boolean_
+
+## Examples
+
+###### Square the value of each record
+```js
+from(bucket:"telegraf/autogen")
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system" and
+ r.cpu == "cpu-total"
+ )
+ |> range(start:-12h)
+ |> map(fn: (r) => r._value * r._value)
+```
+
+###### Create a new table with new format
+```js
+from(bucket:"telegraf/autogen")
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system"
+ )
+ |> range(start:-12h)
+ // create a new table by copying each row into a new format
+ |> map(fn: (r) => ({
+ _time: r._time,
+ app_server: r.host
+ }))
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/pivot.md b/content/v2.0/reference/flux/functions/transformations/pivot.md
new file mode 100644
index 000000000..84438b888
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/pivot.md
@@ -0,0 +1,148 @@
+---
+title: pivot() function
+description: The pivot() function collects values stored vertically (column-wise) in a table and aligns them horizontally (row-wise) into logical sets.
+menu:
+ v2_0_ref:
+ name: pivot
+ parent: Transformations
+ weight: 1
+---
+
+The `pivot()` function collects values stored vertically (column-wise) in a table
+and aligns them horizontally (row-wise) into logical sets.
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+```js
+pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
+```
+
+The group key of the resulting table is the same as the input tables, excluding columns found in the [`columnKey`](#columnkey) and [`valueColumn`](#valuecolumn) parameters.
+This is because these columns are not part of the resulting output table.
+
+Every input row should have a 1:1 mapping to a particular row + column in the output table, determined by its values for the [`rowKey`](#rowkey) and [`columnKey`](#columnkey) parameters.
+In cases where more than one value is identified for the same row + column pair, the last value
+encountered in the set of table rows is used as the result.
+
+Every input row should have a 1:1 mapping to a particular row/column pair in the output table,
+determined by its values for the `rowKey` and `columnKey`.
+In cases where more than one value is identified for the same row/column pair in the output,
+the last value encountered in the set of table rows is used as the result.
+
+The output is constructed as follows:
+
+- The set of columns for the new table is the `rowKey` unioned with the group key,
+ but excluding the columns indicated by the `columnKey` and the `valueColumn`.
+- A new column is added to the set of columns for each unique value identified
+ in the input by the `columnKey` parameter.
+- The label of a new column is the concatenation of the values of `columnKey` using `_` as a separator.
+ If the value is `null`, `"null"` is used.
+- A new row is created for each unique value identified in the input by the `rowKey` parameter.
+- For each new row, values for group key columns stay the same, while values for new columns are
+ determined from the input tables by the value in `valueColumn` at the row identified by the
+ `rowKey` values and the new column's label.
+ If no value is found, the value is set to `null`.
+
+
+## Parameters
+
+### rowKey
+List of columns used to uniquely identify a row for the output.
+
+_**Data type:** Array of strings_
+
+### columnKey
+List of columns used to pivot values onto each row identified by the rowKey.
+
+_**Data type:** Array of strings_
+
+### valueColumn
+The single column that contains the value to be moved around the pivot.
+
+_**Data type:** String_
+
+## Examples
+
+### Align fields within each measurement that have the same timestamp
+
+```js
+from(bucket:"test")
+ |> range(start: 1970-01-01T00:00:00.000000000Z)
+ |> pivot(
+ rowKey:["_time"],
+ columnKey: ["_field"],
+ valueColumn: "_value"
+ )
+```
+
+###### Input
+| _time | _value | _measurement | _field |
+|:------------------------------:|:------:|:------------:|:------:|
+| 1970-01-01T00:00:00.000000001Z | 1.0 | "m1" | "f1" |
+| 1970-01-01T00:00:00.000000001Z | 2.0 | "m1" | "f2" |
+| 1970-01-01T00:00:00.000000001Z | null | "m1" | "f3" |
+| 1970-01-01T00:00:00.000000001Z | 3.0 | "m1" | null |
+| 1970-01-01T00:00:00.000000002Z | 4.0 | "m1" | "f1" |
+| 1970-01-01T00:00:00.000000002Z | 5.0 | "m1" | "f2" |
+| null | 6.0 | "m1" | "f2" |
+| 1970-01-01T00:00:00.000000002Z | null | "m1" | "f3" |
+| 1970-01-01T00:00:00.000000003Z | null | "m1" | "f1" |
+| 1970-01-01T00:00:00.000000003Z | 7.0 | "m1" | null |
+| 1970-01-01T00:00:00.000000004Z | 8.0 | "m1" | "f3" |
+
+###### Output
+| _time | _measurement | f1 | f2 | f3 | null |
+|:------------------------------:|:------------:|:----:|:----:|:----:|:----:|
+| 1970-01-01T00:00:00.000000001Z | "m1" | 1.0 | 2.0 | null | 3.0 |
+| 1970-01-01T00:00:00.000000002Z | "m1" | 4.0 | 5.0 | null | null |
+| null | "m1" | null | 6.0 | null | null |
+| 1970-01-01T00:00:00.000000003Z | "m1" | null | null | null | 7.0 |
+| 1970-01-01T00:00:00.000000004Z | "m1" | null | null | 8.0 | null |
+
+### Align fields and measurements that have the same timestamp
+
+{{% note %}}
+Note the effects of:
+
+- Having null values in some `columnKey` value;
+- Having more values for the same `rowKey` and `columnKey` value
+ (the 11th row overrides the 10th, and so does the 15th with the 14th).
+{{% /note %}}
+
+```js
+from(bucket:"test")
+ |> range(start: 1970-01-01T00:00:00.000000000Z)
+ |> pivot(
+ rowKey:["_time"],
+ columnKey: ["_measurement", "_field"],
+ valueColumn: "_value"
+ )
+```
+
+###### Input
+| _time | _value | _measurement | _field |
+|:------------------------------:|:------:|:------------:|:------:|
+| 1970-01-01T00:00:00.000000001Z | 1.0 | "m1" | "f1" |
+| 1970-01-01T00:00:00.000000001Z | 2.0 | "m1" | "f2" |
+| 1970-01-01T00:00:00.000000001Z | 3.0 | null | "f3" |
+| 1970-01-01T00:00:00.000000001Z | 4.0 | null | null |
+| 1970-01-01T00:00:00.000000002Z | 5.0 | "m1" | "f1" |
+| 1970-01-01T00:00:00.000000002Z | 6.0 | "m1" | "f2" |
+| 1970-01-01T00:00:00.000000002Z | 7.0 | "m1" | "f3" |
+| 1970-01-01T00:00:00.000000002Z | 8.0 | null | null |
+| null | 9.0 | "m1" | "f3" |
+| 1970-01-01T00:00:00.000000003Z | 10.0 | "m1" | null |
+| 1970-01-01T00:00:00.000000003Z | 11.0 | "m1" | null |
+| 1970-01-01T00:00:00.000000003Z | 12.0 | "m1" | "f3" |
+| 1970-01-01T00:00:00.000000003Z | 13.0 | null | null |
+| null | 14.0 | "m1" | null |
+| null | 15.0 | "m1" | null |
+
+###### Output
+| _time | m1_f1 | m1_f2 | null_f3 | null_null | m1_f3 | m1_null |
+|:------------------------------:|:-----:|:-----:|:---------:|:---------:|:-----:|:-------:|
+| 1970-01-01T00:00:00.000000001Z | 1.0 | 2.0 | 3.0 | 4.0 | null | null |
+| 1970-01-01T00:00:00.000000002Z | 5.0 | 6.0 | null | 8.0 | 7.0 | null |
+| null | null | null | null | null | 9.0 | 15.0 |
+| 1970-01-01T00:00:00.000000003Z | null | null | null | 13.0 | 12.0 | 11.0 |
diff --git a/content/v2.0/reference/flux/functions/transformations/range.md b/content/v2.0/reference/flux/functions/transformations/range.md
new file mode 100644
index 000000000..ee2574b85
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/range.md
@@ -0,0 +1,70 @@
+---
+title: range() function
+description: The range() function filters records based on time bounds.
+menu:
+ v2_0_ref:
+ name: range
+ parent: Transformations
+ weight: 1
+---
+
+The `range()` function filters records based on time bounds.
+Each input table's records are filtered to contain only records that exist within the time bounds.
+Records with a `null` value for their time are filtered.
+Each input table's group key value is modified to fit within the time bounds.
+Tables where all records exists outside the time bounds are filtered entirely.
+
+_**Function type:** Transformation_
+_**Output data type:* Object_
+
+```js
+range(start: -15m, stop: now)
+```
+
+## Parameters
+
+### start
+Specifies the oldest time to be included in the results.
+
+Relative start times are defined using negative durations.
+Negative durations are relative to now.
+Absolute start times are defined using timestamps.
+
+_**Data type:** Duration or Timestamp_
+
+### stop
+Specifies the exclusive newest time to be included in the results. Defaults to `now`.
+
+Relative stop times are defined using negative durations.
+Negative durations are relative to now.
+Absolute stop times are defined using timestamps.
+
+_**Data type:** Duration or Timestamp_
+
+## Examples
+
+###### Time range relative to now
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-12h)
+ // ...
+```
+
+###### Relative time range
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-12h, stop: -15m)
+ // ...
+```
+
+###### Absolute time range
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:2018-05-22T23:30:00Z, stop: 2018-05-23T00:00:00Z)
+ // ...
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[WHERE](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#the-where-clause)
diff --git a/content/v2.0/reference/flux/functions/transformations/rename.md b/content/v2.0/reference/flux/functions/transformations/rename.md
new file mode 100644
index 000000000..972d21615
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/rename.md
@@ -0,0 +1,57 @@
+---
+title: rename() function
+description: The rename() function renames specified columns in a table.
+menu:
+ v2_0_ref:
+ name: rename
+ parent: Transformations
+ weight: 1
+---
+
+The `rename()` function renames specified columns in a table.
+If a column is renamed and is part of the group key, the column name in the group key will be updated.
+
+There are two variants:
+
+- one which maps old column names to new column names
+- one which takes a mapping function.
+
+_**Function type:** Transformation_
+
+```js
+rename(columns: {host: "server", facility: "datacenter"})
+
+// OR
+
+rename(fn: (column) => "{column}_new")
+```
+
+## Parameters
+
+### columns
+A map of columns to rename and their corresponding new names.
+Cannot be used with `fn`.
+
+_**Data type:** Object_
+
+### fn
+A function mapping between old and new column names.
+Cannot be used with `columns`.
+
+_**Data type:** Function_
+
+## Examples
+
+##### Rename a single column
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> rename(columns: {host: "server"})
+```
+
+##### Rename all columns using a function
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> rename(fn: (column) => column + "_new")
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/_index.md b/content/v2.0/reference/flux/functions/transformations/selectors/_index.md
new file mode 100644
index 000000000..c3f75b850
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/_index.md
@@ -0,0 +1,24 @@
+---
+title: Flux selector functions
+description: Flux selector functions return one or more records based on function logic.
+menu:
+ v2_0_ref:
+ parent: Transformations
+ name: Selectors
+ weight: 1
+---
+
+Flux selector functions return one or more records based on function logic.
+The output table is different than the input table, but individual row values are not.
+
+The following selector functions are available:
+
+{{< function-list category="Selectors" menu="v2_0_ref" >}}
+
+
+### Selectors and aggregates
+The following functions can be used as both selectors or aggregates, but they are
+categorized as aggregate functions in this documentation:
+
+- [median](/v2.0/reference/flux/functions/transformations/aggregates/median)
+- [percentile](/v2.0/reference/flux/functions/transformations/aggregates/percentile)
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/bottom.md b/content/v2.0/reference/flux/functions/transformations/selectors/bottom.md
new file mode 100644
index 000000000..0bb331126
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/bottom.md
@@ -0,0 +1,60 @@
+---
+title: bottom() function
+description: The bottom() function sorts a table by columns and keeps only the bottom n records.
+menu:
+ v2_0_ref:
+ name: bottom
+ parent: Selectors
+ weight: 1
+---
+
+The `bottom()` function sorts a table by columns and keeps only the bottom `n` records.
+
+_**Function type:** Selector_
+_**Output data type:** Object_
+
+```js
+bottom(n:10, columns: ["_value"])
+```
+
+## Parameters
+
+### n
+Number of records to return.
+
+_**Data type:** Integer_
+
+### columns
+List of columns by which to sort.
+Sort precedence is determined by list order (left to right).
+Default is `["_value"]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> bottom(n:10)
+```
+
+## Function definition
+```js
+// _sortLimit is a helper function, which sorts and limits a table.
+_sortLimit = (n, desc, columns=["_value"], tables=<-) =>
+ tables
+ |> sort(columns:columns, desc:desc)
+ |> limit(n:n)
+
+bottom = (n, columns=["_value"], tables=<-) =>
+ _sortLimit(n:n, columns:columns, desc:false)
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[BOTTOM()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#bottom)
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/distinct.md b/content/v2.0/reference/flux/functions/transformations/selectors/distinct.md
new file mode 100644
index 000000000..2accafb58
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/distinct.md
@@ -0,0 +1,39 @@
+---
+title: distinct() function
+description: The distinct() function returns the unique values for a given column.
+menu:
+ v2_0_ref:
+ name: distinct
+ parent: Selectors
+ weight: 1
+---
+
+The `distinct()` function returns the unique values for a given column.
+`null` is considered its own distinct value if it is present.
+
+_**Function type:** Selector_
+_**Output data type:** Object_
+
+```js
+distinct(column: "host")
+```
+
+## Parameters
+
+### column
+Column on which to track unique values.
+
+_**Data type:** string_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> filter(fn: (r) => r._measurement == "cpu")
+ |> distinct(column: "host")
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[DISTINCT()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#distinct)
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/first.md b/content/v2.0/reference/flux/functions/transformations/selectors/first.md
new file mode 100644
index 000000000..8cc9453bd
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/first.md
@@ -0,0 +1,34 @@
+---
+title: first() function
+description: The first() function selects the first non-null record from an input table.
+menu:
+ v2_0_ref:
+ name: first
+ parent: Selectors
+ weight: 1
+---
+
+The `first()` function selects the first non-null record from an input table.
+
+_**Function type:** Selector_
+_**Output data type:** Object_
+
+```js
+first()
+```
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system"
+ )
+ |> first()
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[FIRST()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#first)
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/highestaverage.md b/content/v2.0/reference/flux/functions/transformations/selectors/highestaverage.md
new file mode 100644
index 000000000..556ff4788
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/highestaverage.md
@@ -0,0 +1,82 @@
+---
+title: highestAverage() function
+description: The highestAverage() function returns the top 'n' records from all groups using the average of each group.
+menu:
+ v2_0_ref:
+ name: highestAverage
+ parent: Selectors
+ weight: 1
+---
+
+The `highestAverage()` function returns the top `n` records from all groups using the average of each group.
+
+_**Function type:** Selector, Aggregate_
+
+```js
+highestAverage(
+ n:10,
+ columns: ["_value"],
+ groupColumns: []
+)
+```
+
+## Parameters
+
+### n
+Number of records to return.
+
+_**Data type:** Integer_
+
+### columns
+List of columns by which to sort.
+Sort precedence is determined by list order (left to right).
+Default is `["_value"]`.
+
+_**Data type:** Array of strings_
+
+### groupColumns
+The columns on which to group before performing the aggregation.
+Default is `[]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> highestAverage(n:10, groupColumns: ["host"])
+```
+
+## Function definition
+```js
+// _sortLimit is a helper function, which sorts and limits a table.
+_sortLimit = (n, desc, columns=["_value"], tables=<-) =>
+ tables
+ |> sort(columns:columns, desc:desc)
+ |> limit(n:n)
+
+// _highestOrLowest is a helper function which reduces all groups into a single
+// group by specific tags and a reducer function. It then selects the highest or
+// lowest records based on the columns and the _sortLimit function.
+// The default reducer assumes no reducing needs to be performed.
+_highestOrLowest = (n, _sortLimit, reducer, columns=["_value"], groupColumns=[], tables=<-) =>
+ tables
+ |> group(columns:groupColumns)
+ |> reducer()
+ |> group(columns:[])
+ |> _sortLimit(n:n, columns:columns)
+
+highestAverage = (n, columns=["_value"], groupColumns=[], tables=<-) =>
+ tables
+ |> _highestOrLowest(
+ n:n,
+ columns:columns,
+ groupColumns:groupColumns,
+ reducer: (tables=<-) => tables |> mean(columns:[columns[0]]),
+ _sortLimit: top,
+ )
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/highestcurrent.md b/content/v2.0/reference/flux/functions/transformations/selectors/highestcurrent.md
new file mode 100644
index 000000000..ba92d92b7
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/highestcurrent.md
@@ -0,0 +1,82 @@
+---
+title: highestCurrent() function
+description: The highestCurrent() function returns the top 'n' records from all groups using the last value of each group.
+menu:
+ v2_0_ref:
+ name: highestCurrent
+ parent: Selectors
+ weight: 1
+---
+
+The `highestCurrent()` function returns the top `n` records from all groups using the last value of each group.
+
+_**Function type:** Selector, Aggregate_
+
+```js
+highestCurrent(
+ n:10,
+ columns: ["_value"],
+ groupColumns: []
+)
+```
+
+## Parameters
+
+### n
+Number of records to return.
+
+_**Data type:** Integer_
+
+### columns
+List of columns by which to sort.
+Sort precedence is determined by list order (left to right).
+Default is `["_value"]`.
+
+_**Data type:** Array of strings_
+
+### groupColumns
+The columns on which to group before performing the aggregation.
+Default is `[]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> highestCurrent(n:10, groupColumns: ["host"])
+```
+
+## Function definition
+```js
+// _sortLimit is a helper function, which sorts and limits a table.
+_sortLimit = (n, desc, columns=["_value"], tables=<-) =>
+ tables
+ |> sort(columns:columns, desc:desc)
+ |> limit(n:n)
+
+// _highestOrLowest is a helper function which reduces all groups into a single
+// group by specific tags and a reducer function. It then selects the highest or
+// lowest records based on the columns and the _sortLimit function.
+// The default reducer assumes no reducing needs to be performed.
+_highestOrLowest = (n, _sortLimit, reducer, columns=["_value"], groupColumns=[], tables=<-) =>
+ tables
+ |> group(columns:groupColumns)
+ |> reducer()
+ |> group(columns:[])
+ |> _sortLimit(n:n, columns:columns)
+
+highestCurrent = (n, columns=["_value"], groupColumns=[], tables=<-) =>
+ tables
+ |> _highestOrLowest(
+ n:n,
+ columns:columns,
+ groupColumns:groupColumns,
+ reducer: (tables=<-) => tables |> last(column:columns[0]),
+ _sortLimit: top,
+ )
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/highestmax.md b/content/v2.0/reference/flux/functions/transformations/selectors/highestmax.md
new file mode 100644
index 000000000..c25cb1c2a
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/highestmax.md
@@ -0,0 +1,82 @@
+---
+title: highestMax() function
+description: The highestMax() function returns the top 'n' records from all groups using the maximum of each group.
+menu:
+ v2_0_ref:
+ name: highestMax
+ parent: Selectors
+ weight: 1
+---
+
+The `highestMax()` function returns the top `n` records from all groups using the maximum of each group.
+
+_**Function type:** Selector, Aggregate_
+
+```js
+highestMax(
+ n:10,
+ columns: ["_value"],
+ groupColumns: []
+)
+```
+
+## Parameters
+
+### n
+Number of records to return.
+
+_**Data type:** Integer_
+
+### columns
+List of columns by which to sort.
+Sort precedence is determined by list order (left to right).
+Default is `["_value"]`.
+
+_**Data type:** Array of strings_
+
+### groupColumns
+The columns on which to group before performing the aggregation.
+Default is `[]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> highestMax(n:10, groupColumns: ["host"])
+```
+
+## Function definition
+```js
+// _sortLimit is a helper function, which sorts and limits a table.
+_sortLimit = (n, desc, columns=["_value"], tables=<-) =>
+ tables
+ |> sort(columns:columns, desc:desc)
+ |> limit(n:n)
+
+// _highestOrLowest is a helper function which reduces all groups into a single
+// group by specific tags and a reducer function. It then selects the highest or
+// lowest records based on the columns and the _sortLimit function.
+// The default reducer assumes no reducing needs to be performed.
+_highestOrLowest = (n, _sortLimit, reducer, columns=["_value"], groupColumns=[], tables=<-) =>
+ tables
+ |> group(columns:groupColumns)
+ |> reducer()
+ |> group(columns:[])
+ |> _sortLimit(n:n, columns:columns)
+
+highestMax = (n, columns=["_value"], groupColumns=[], tables=<-) =>
+ tables
+ |> _highestOrLowest(
+ n:n,
+ columns:columns,
+ groupColumns:groupColumns,
+ reducer: (tables=<-) => tables |> max(column:columns[0]),
+ _sortLimit: top
+ )
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/last.md b/content/v2.0/reference/flux/functions/transformations/selectors/last.md
new file mode 100644
index 000000000..2b754b1ed
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/last.md
@@ -0,0 +1,34 @@
+---
+title: last() function
+description: The last() function selects the last non-null record from an input table.
+menu:
+ v2_0_ref:
+ name: last
+ parent: Selectors
+ weight: 1
+---
+
+The `last()` function selects the last non-null record from an input table.
+
+_**Function type:** Selector_
+_**Output data type:** Object_
+
+```js
+last()
+```
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system"
+ )
+ |> last()
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[LAST()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#last)
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/lowestaverage.md b/content/v2.0/reference/flux/functions/transformations/selectors/lowestaverage.md
new file mode 100644
index 000000000..433c3af76
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/lowestaverage.md
@@ -0,0 +1,83 @@
+---
+title: lowestAverage() function
+description: The lowestAverage() function returns the bottom 'n' records from all groups using the average of each group.
+menu:
+ v2_0_ref:
+ name: lowestAverage
+ parent: Selectors
+ weight: 1
+---
+
+The `lowestAverage()` function returns the bottom `n` records from all groups using the average of each group.
+
+_**Function type:** Selector, Aggregate_
+
+```js
+lowestAverage(
+ n:10,
+ columns: ["_value"],
+ groupColumns: []
+)
+```
+
+## Parameters
+
+### n
+Number of records to return.
+
+_**Data type:** Integer_
+
+### columns
+List of columns by which to sort.
+Sort precedence is determined by list order (left to right).
+Default is `["_value"]`.
+
+_**Data type:** Array of strings_
+
+### groupColumns
+The columns on which to group before performing the aggregation.
+Default is `[]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> lowestAverage(n:10, groupColumns: ["host"])
+```
+
+## Function definition
+```js
+// _sortLimit is a helper function, which sorts and limits a table.
+_sortLimit = (n, desc, columns=["_value"], tables=<-) =>
+ tables
+ |> sort(columns:columns, desc:desc)
+ |> limit(n:n)
+
+// _highestOrLowest is a helper function which reduces all groups into a single
+// group by specific tags and a reducer function. It then selects the highest or
+// lowest records based on the columns and the _sortLimit function.
+// The default reducer assumes no reducing needs to be performed.
+_highestOrLowest = (n, _sortLimit, reducer, columns=["_value"], groupColumns=[], tables=<-) =>
+ tables
+ |> group(columns:groupColumns)
+ |> reducer()
+ |> group(columns:[])
+ |> _sortLimit(n:n, columns:columns)
+
+lowestAverage = (n, columns=["_value"], groupColumns=[], tables=<-) =>
+ tables
+ |> _highestOrLowest(
+ n:n,
+ columns:columns,
+ groupColumns:groupColumns,
+ reducer: (tables=<-) => tables |> mean(columns:[columns[0]]),
+ _sortLimit: bottom,
+ )
+
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/lowestcurrent.md b/content/v2.0/reference/flux/functions/transformations/selectors/lowestcurrent.md
new file mode 100644
index 000000000..207ae46b0
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/lowestcurrent.md
@@ -0,0 +1,82 @@
+---
+title: lowestCurrent() function
+description: The lowestCurrent() function returns the bottom 'n' records from all groups using the last value of each group.
+menu:
+ v2_0_ref:
+ name: lowestCurrent
+ parent: Selectors
+ weight: 1
+---
+
+The `lowestCurrent()` function returns the bottom `n` records from all groups using the last value of each group.
+
+_**Function type:** Selector, Aggregate_
+
+```js
+lowestCurrent(
+ n:10,
+ columns: ["_value"],
+ groupColumns: []
+)
+```
+
+## Parameters
+
+### n
+Number of records to return.
+
+_**Data type:** Integer_
+
+### columns
+List of columns by which to sort.
+Sort precedence is determined by list order (left to right).
+Default is `["_value"]`.
+
+_**Data type:** Array of strings_
+
+### groupColumns
+The columns on which to group before performing the aggregation.
+Default is `[]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> lowestCurrent(n:10, groupColumns: ["host"])
+```
+
+## Function definition
+```js
+// _sortLimit is a helper function, which sorts and limits a table.
+_sortLimit = (n, desc, columns=["_value"], tables=<-) =>
+ tables
+ |> sort(columns:columns, desc:desc)
+ |> limit(n:n)
+
+// _highestOrLowest is a helper function which reduces all groups into a single
+// group by specific tags and a reducer function. It then selects the highest or
+// lowest records based on the columns and the _sortLimit function.
+// The default reducer assumes no reducing needs to be performed.
+_highestOrLowest = (n, _sortLimit, reducer, columns=["_value"], groupColumns=[], tables=<-) =>
+ tables
+ |> group(columns:groupColumns)
+ |> reducer()
+ |> group(columns:[])
+ |> _sortLimit(n:n, columns:columns)
+
+lowestCurrent = (n, columns=["_value"], groupColumns=[], tables=<-) =>
+ tables
+ |> _highestOrLowest(
+ n:n,
+ columns:columns,
+ groupColumns:groupColumns,
+ reducer: (tables=<-) => tables |> last(column:columns[0]),
+ _sortLimit: bottom,
+ )
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/lowestmin.md b/content/v2.0/reference/flux/functions/transformations/selectors/lowestmin.md
new file mode 100644
index 000000000..fc81656d1
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/lowestmin.md
@@ -0,0 +1,83 @@
+---
+title: lowestMin() function
+description: The lowestMin() function returns the bottom 'n' records from all groups using the minimum of each group.
+menu:
+ v2_0_ref:
+ name: lowestMin
+ parent: Selectors
+ weight: 1
+---
+
+The `lowestMin()` function returns the bottom `n` records from all groups using the minimum of each group.
+
+_**Function type:** Selector, Aggregate_
+
+```js
+lowestMin(
+ n:10,
+ columns: ["_value"],
+ groupColumns: []
+)
+```
+
+## Parameters
+
+### n
+Number of records to return.
+
+_**Data type:** Integer_
+
+### columns
+List of columns by which to sort.
+Sort precedence is determined by list order (left to right).
+Default is `["_value"]`.
+
+_**Data type:** Array of strings_
+
+### groupColumns
+The columns on which to group before performing the aggregation.
+Default is `[]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> lowestMin(n:10, groupColumns: ["host"])
+```
+
+## Function definition
+```js
+// _sortLimit is a helper function, which sorts and limits a table.
+_sortLimit = (n, desc, columns=["_value"], tables=<-) =>
+ tables
+ |> sort(columns:columns, desc:desc)
+ |> limit(n:n)
+
+// _highestOrLowest is a helper function which reduces all groups into a single
+// group by specific tags and a reducer function. It then selects the highest or
+// lowest records based on the columns and the _sortLimit function.
+// The default reducer assumes no reducing needs to be performed.
+_highestOrLowest = (n, _sortLimit, reducer, columns=["_value"], groupColumns=[], tables=<-) =>
+ tables
+ |> group(columns:groupColumns)
+ |> reducer()
+ |> group(columns:[])
+ |> _sortLimit(n:n, columns:columns)
+
+lowestMin = (n, columns=["_value"], groupColumns=[], tables=<-) =>
+ tables
+ |> _highestOrLowest(
+ n:n,
+ columns:columns,
+ groupColumns:groupColumns,
+ // TODO(nathanielc): Once max/min support selecting based on multiple columns change this to pass all columns.
+ reducer: (tables=<-) => tables |> min(column:columns[0]),
+ _sortLimit: bottom,
+ )
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/max.md b/content/v2.0/reference/flux/functions/transformations/selectors/max.md
new file mode 100644
index 000000000..347c87da1
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/max.md
@@ -0,0 +1,34 @@
+---
+title: max() function
+description: The max() function selects record with the highest _value from the input table.
+menu:
+ v2_0_ref:
+ name: max
+ parent: Selectors
+ weight: 1
+---
+
+The `max()` function selects record with the highest `_value` from the input table.
+
+_**Function type:** Selector_
+_**Output data type:** Object_
+
+```js
+max()
+```
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system"
+ )
+ |> max()
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[MAX()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#max)
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/min.md b/content/v2.0/reference/flux/functions/transformations/selectors/min.md
new file mode 100644
index 000000000..fa683d403
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/min.md
@@ -0,0 +1,34 @@
+---
+title: min() function
+description: The min() function selects record with the lowest _value from the input table.
+menu:
+ v2_0_ref:
+ name: min
+ parent: Selectors
+ weight: 1
+---
+
+The `min()` function selects record with the lowest `_value` from the input table.
+
+_**Function type:** Selector_
+_**Output data type:** Object_
+
+```js
+min()
+```
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system"
+ )
+ |> min()
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[MIN()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#min)
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/sample.md b/content/v2.0/reference/flux/functions/transformations/selectors/sample.md
new file mode 100644
index 000000000..b291c34f8
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/sample.md
@@ -0,0 +1,49 @@
+---
+title: sample() function
+description: The sample() function selects a subset of the records from the input table.
+menu:
+ v2_0_ref:
+ name: sample
+ parent: Selectors
+ weight: 1
+---
+
+The `sample()` function selects a subset of the records from the input table.
+
+_**Function type:** Selector_
+_**Output data type:** Object_
+
+```js
+sample(n:5, pos: -1)
+```
+
+## Parameters
+
+### n
+Sample every Nth element.
+
+_**Data type:** Integer_
+
+### pos
+The position offset from the start of results where sampling begins.
+`pos` must be less than `n`.
+If `pos` is less than 0, a random offset is used.
+Defaults to `-1` (random offset).
+
+_**Data type:** Integer_
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1d)
+ |> filter(fn: (r) =>
+ r._measurement == "cpu" and
+ r._field == "usage_system"
+ )
+ |> sample(n: 5, pos: 1)
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[SAMPLE()](https://docs.influxdata.com/influxdb/latest/query_language/functions/#sample)
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/top.md b/content/v2.0/reference/flux/functions/transformations/selectors/top.md
new file mode 100644
index 000000000..eae546760
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/top.md
@@ -0,0 +1,54 @@
+---
+title: top() function
+description: The top() function sorts a table by columns and keeps only the top n records.
+menu:
+ v2_0_ref:
+ name: top
+ parent: Selectors
+ weight: 1
+---
+
+The `top()` function sorts a table by columns and keeps only the top `n` records.
+
+_**Function type:** Selector_
+_**Output data type:** Object_
+
+```js
+top(n:10, columns: ["_value"])
+```
+
+## Parameters
+
+### n
+Number of records to return.
+
+_**Data type:** Integer_
+
+### columns
+List of columns by which to sort.
+Sort precedence is determined by list order (left to right).
+Default is `["_value"]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-1h)
+ |> filter(fn: (r) =>
+ r._measurement == "mem" and
+ r._field == "used_percent"
+ )
+ |> top(n:10)
+```
+
+## Function definition
+```js
+// _sortLimit is a helper function, which sorts and limits a table.
+_sortLimit = (n, desc, columns=["_value"], tables=<-) =>
+ tables
+ |> sort(columns:columns, desc:desc)
+ |> limit(n:n)
+
+top = (n, columns=["_value"], tables=<-) => _sortLimit(n:n, columns:columns, desc:true)
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/selectors/unique.md b/content/v2.0/reference/flux/functions/transformations/selectors/unique.md
new file mode 100644
index 000000000..67f7a78fe
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/selectors/unique.md
@@ -0,0 +1,34 @@
+---
+title: unique() function
+description: The unique() function returns all records containing unique values in a specified column.
+menu:
+ v2_0_ref:
+ name: unique
+ parent: Selectors
+ weight: 1
+---
+
+The `unique()` function returns all records containing unique values in a specified column.
+
+_**Function type:** Selector_
+_**Output data type:** Object_
+
+```js
+unique(column: "_value")
+```
+
+## Parameters
+
+### column
+The column searched for unique values.
+Defaults to `"_value"`.
+
+_**Data type:** String_
+
+## Examples
+```js
+from("telegraf/autogen")
+ |> range(start: -15m)
+ |> filter(fn: (r) => r._measurement == "syslog")
+ |> unique(column: "message")
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/set.md b/content/v2.0/reference/flux/functions/transformations/set.md
new file mode 100644
index 000000000..ccc90dd12
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/set.md
@@ -0,0 +1,38 @@
+---
+title: set() function
+description: The set() function assigns a static value to each record in the input table.
+menu:
+ v2_0_ref:
+ name: set
+ parent: Transformations
+ weight: 1
+---
+
+The `set()` function assigns a static value to each record in the input table.
+The key may modify an existing column or add a new column to the tables.
+If the modified column is part of the group key, the output tables are regrouped as needed.
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+```js
+set(key: "myKey",value: "myValue")
+```
+
+## Parameters
+
+### key
+The label of the column to modify or set.
+
+_**Data type:** String_
+
+### value
+The string value to set.
+
+_**Data type:** String_
+
+## Examples
+```js
+from(bucket: "telegraf/autogen")
+ |> set(key: "host", value: "prod-node-1")
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/shift.md b/content/v2.0/reference/flux/functions/transformations/shift.md
new file mode 100644
index 000000000..b00a76346
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/shift.md
@@ -0,0 +1,48 @@
+---
+title: shift() function
+description: The shift() function adds a fixed duration to time columns.
+menu:
+ v2_0_ref:
+ name: shift
+ parent: Transformations
+ weight: 1
+---
+
+The `shift()` function adds a fixed duration to time columns.
+The output table schema is the same as the input table.
+If the time is `null`, the time will continue to be `null`.
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+```js
+shift(shift: 10h, columns: ["_start", "_stop", "_time"])
+```
+
+## Parameters
+
+### shift
+The amount of time to add to each time value. The shift may be a negative duration.
+
+_**Data type:** Duration_
+
+### columns
+The list of all columns to be shifted. Defaults to `["_start", "_stop", "_time"]`.
+
+_**Data type:** Array of strings_
+
+## Examples
+
+###### Shift forward in time
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> shift(shift: 12h)
+```
+
+###### Shift backward in time
+```js
+from(bucket: "telegraf/autogen")
+ |> range(start: -5m)
+ |> shift(shift: -12h)
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/sort.md b/content/v2.0/reference/flux/functions/transformations/sort.md
new file mode 100644
index 000000000..8b5361423
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/sort.md
@@ -0,0 +1,51 @@
+---
+title: sort() function
+description: The sort() function orders the records within each table.
+menu:
+ v2_0_ref:
+ name: sort
+ parent: Transformations
+ weight: 1
+---
+
+The `sort()` function orders the records within each table.
+One output table is produced for each input table.
+The output tables will have the same schema as their corresponding input tables.
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+#### Sorting with null values
+When sorting, `null` values will always be first.
+When `desc: false`, nulls are less than every other value.
+When `desc: true`, nulls are greater than every value.
+
+```js
+sort(columns: ["_value"], desc: false)
+```
+
+## Parameters
+
+### columns
+List of columns by which to sort.
+Sort precedence is determined by list order (left to right).
+Default is `["_value"]`.
+
+_**Data type:** Array of strings_
+
+### desc
+Sort results in descending order.
+Default is `false`.
+
+_**Data type:** Boolean_
+
+## Examples
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-12h)
+ |> filter(fn: (r) =>
+ r._measurement == "system" and
+ r._field == "uptime"
+ )
+ |> sort(columns:["region", "host", "_value"])
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/statecount.md b/content/v2.0/reference/flux/functions/transformations/statecount.md
new file mode 100644
index 000000000..73b727d79
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/statecount.md
@@ -0,0 +1,51 @@
+---
+title: stateCount() function
+description: The stateCount() function computes the number of consecutive records in a given state.
+menu:
+ v2_0_ref:
+ name: stateCount
+ parent: Transformations
+ weight: 1
+---
+
+The `stateCount()` function computes the number of consecutive records in a given state.
+The state is defined via the function `fn`.
+For each consecutive point that evaluates as `true`, the state count will be incremented.
+When a point evaluates as `false`, the state count is reset.
+The state count is added as an additional column to each record.
+
+_**Function type:** Transformation_
+_**Output data type:** Integer_
+
+```js
+stateCount(fn: (r) => r._field == "state", column: "stateCount")
+```
+
+_If the expression generates an error during evaluation, the point is discarded
+and does not affect the state count._
+
+## Parameters
+
+### fn
+A single argument function that evaluates true or false to identify the state of the record.
+Records are passed to the function.
+Those that evaluate to `true` increment the state count.
+Those that evaluate to `false` reset the state count.
+
+_**Data type:** Function_
+
+### column
+The name of the column added to each record that contains the incremented state count.
+
+_**Data type:** String_
+
+## Examples
+```js
+from("monitor/autogen")
+ |> range(start: -1h)
+ |> filter(fn: (r) => r._measurement == "http")
+ |> stateCount(
+ fn: (r) => r.http_response_code == "500",
+ column: "server_error_count"
+ )
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/stateduration.md b/content/v2.0/reference/flux/functions/transformations/stateduration.md
new file mode 100644
index 000000000..fc08d283b
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/stateduration.md
@@ -0,0 +1,63 @@
+---
+title: stateDuration() function
+description: The stateDuration() function computes the duration of a given state.
+menu:
+ v2_0_ref:
+ name: stateDuration
+ parent: Transformations
+ weight: 1
+---
+
+The `stateDuration()` function computes the duration of a given state.
+The state is defined via the function `fn`.
+For each consecutive point for that evaluates as `true`, the state duration will be
+incremented by the duration between points.
+When a point evaluates as `false`, the state duration is reset.
+The state duration is added as an additional column to each record.
+
+_**Function type:** Transformation_
+_**Output data type:** Duration_
+
+{{% note %}}
+As the first point in the given state has no previous point, its
+state duration will be 0.
+{{% /note %}}
+
+```js
+stateDuration(fn: (r) => r._measurement == "state", column: "stateDuration", unit: 1s)
+```
+
+_If the expression generates an error during evaluation, the point is discarded,
+and does not affect the state duration._
+
+## Parameters
+
+### fn
+A single argument function that evaluates true or false to identify the state of the record.
+Records are passed to the function.
+Those that evaluate to `true` increment the state duration.
+Those that evaluate to `false` reset the state duration.
+
+_**Data type:** Function_
+
+### column
+The name of the column added to each record that contains the state duration.
+
+_**Data type:** String_
+
+### unit
+The unit of time in which the state duration is incremented.
+For example: `1s`, `1m`, `1h`, etc.
+
+_**Data type:** Duration_
+
+## Examples
+```js
+from("monitor/autogen")
+ |> range(start: -1h)
+ |> filter(fn: (r) => r._measurement == "http")
+ |> stateDuration(
+ fn: (r) => r.http_response_code == "500",
+ column: "server_error_duration"
+ )
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/type-conversions/_index.md b/content/v2.0/reference/flux/functions/transformations/type-conversions/_index.md
new file mode 100644
index 000000000..5b6e04c44
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/type-conversions/_index.md
@@ -0,0 +1,14 @@
+---
+title: Flux type conversion functions
+description: Flux type conversion functions convert columns of the input table into a specific data type.
+menu:
+ v2_0_ref:
+ parent: Transformations
+ name: Type conversions
+ weight: 1
+---
+
+Flux type conversion functions convert columns of the input table into a specific data type.
+The following type conversion functions are available:
+
+{{< function-list category="Type conversions" menu="v2_0_ref" >}}
diff --git a/content/v2.0/reference/flux/functions/transformations/type-conversions/tobool.md b/content/v2.0/reference/flux/functions/transformations/type-conversions/tobool.md
new file mode 100644
index 000000000..3dc1e308a
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/type-conversions/tobool.md
@@ -0,0 +1,35 @@
+---
+title: toBool() function
+description: The toBool() function converts a value to a boolean.
+menu:
+ v2_0_ref:
+ name: toBool
+ parent: Type conversions
+ weight: 1
+---
+
+The `toBool()` function converts a value to a boolean.
+
+_**Function type:** Type conversion_
+_**Output data type:** Boolean_
+
+```js
+toBool()
+```
+
+## Examples
+```js
+from(bucket: "telegraf")
+ |> filter(fn:(r) =>
+ r._measurement == "mem" and
+ r._field == "used"
+ )
+ |> toBool()
+```
+
+## Function definition
+```js
+toBool = (tables=<-) =>
+ tables
+ |> map(fn:(r) => bool(v: r._value))
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/type-conversions/toduration.md b/content/v2.0/reference/flux/functions/transformations/type-conversions/toduration.md
new file mode 100644
index 000000000..b329dbaeb
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/type-conversions/toduration.md
@@ -0,0 +1,35 @@
+---
+title: toDuration() function
+description: The toDuration() function converts a value to a duration.
+menu:
+ v2_0_ref:
+ name: toDuration
+ parent: Type conversions
+ weight: 1
+---
+
+The `toDuration()` function converts a value to a duration.
+
+_**Function type:** Type conversion_
+_**Output data type:** Duration_
+
+```js
+toDuration()
+```
+
+## Examples
+```js
+from(bucket: "telegraf")
+ |> filter(fn:(r) =>
+ r._measurement == "mem" and
+ r._field == "used"
+ )
+ |> toDuration()
+```
+
+## Function definition
+```js
+toDuration = (tables=<-) =>
+ tables
+ |> map(fn:(r) => duration(v: r._value))
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/type-conversions/tofloat.md b/content/v2.0/reference/flux/functions/transformations/type-conversions/tofloat.md
new file mode 100644
index 000000000..66fba14d8
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/type-conversions/tofloat.md
@@ -0,0 +1,35 @@
+---
+title: toFloat() function
+description: The toFloat() function converts a value to a float.
+menu:
+ v2_0_ref:
+ name: toFloat
+ parent: Type conversions
+ weight: 1
+---
+
+The `toFloat()` function converts a value to a float.
+
+_**Function type:** Type conversion_
+_**Output data type:** Float_
+
+```js
+toFloat()
+```
+
+## Examples
+```js
+from(bucket: "telegraf")
+ |> filter(fn:(r) =>
+ r._measurement == "mem" and
+ r._field == "used"
+ )
+ |> toFloat()
+```
+
+## Function definition
+```js
+toFloat = (tables=<-) =>
+ tables
+ |> map(fn:(r) => float(v: r._value))
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/type-conversions/toint.md b/content/v2.0/reference/flux/functions/transformations/type-conversions/toint.md
new file mode 100644
index 000000000..565920f94
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/type-conversions/toint.md
@@ -0,0 +1,35 @@
+---
+title: toInt() function
+description: The toInt() function converts a value to an integer.
+menu:
+ v2_0_ref:
+ name: toInt
+ parent: Type conversions
+ weight: 1
+---
+
+The `toInt()` function converts a value to an integer.
+
+_**Function type:** Type conversion_
+_**Output data type:** Integer_
+
+```js
+toInt()
+```
+
+## Examples
+```js
+from(bucket: "telegraf")
+ |> filter(fn:(r) =>
+ r._measurement == "mem" and
+ r._field == "used"
+ )
+ |> toInt()
+```
+
+## Function definition
+```js
+toInt = (tables=<-) =>
+ tables
+ |> map(fn:(r) => int(v: r._value))
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/type-conversions/tostring.md b/content/v2.0/reference/flux/functions/transformations/type-conversions/tostring.md
new file mode 100644
index 000000000..89505f6b3
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/type-conversions/tostring.md
@@ -0,0 +1,35 @@
+---
+title: toString() function
+description: The toString() function converts a value to a string.
+menu:
+ v2_0_ref:
+ name: toString
+ parent: Type conversions
+ weight: 1
+---
+
+The `toString()` function converts a value to a string.
+
+_**Function type:** Type conversion_
+_**Output data type:** String_
+
+```js
+toString()
+```
+
+## Examples
+```js
+from(bucket: "telegraf")
+ |> filter(fn:(r) =>
+ r._measurement == "mem" and
+ r._field == "used"
+ )
+ |> toString()
+```
+
+## Function definition
+```js
+toString = (tables=<-) =>
+ tables
+ |> map(fn:(r) => string(v: r._value))
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/type-conversions/totime.md b/content/v2.0/reference/flux/functions/transformations/type-conversions/totime.md
new file mode 100644
index 000000000..0d8fe53a0
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/type-conversions/totime.md
@@ -0,0 +1,35 @@
+---
+title: toTime() function
+description: The toTime() function converts a value to a time.
+menu:
+ v2_0_ref:
+ name: toTime
+ parent: Type conversions
+ weight: 1
+---
+
+The `toTime()` function converts a value to a time.
+
+_**Function type:** Type conversion_
+_**Output data type:** Time_
+
+```js
+toTime()
+```
+
+## Examples
+```js
+from(bucket: "telegraf")
+ |> filter(fn:(r) =>
+ r._measurement == "mem" and
+ r._field == "used"
+ )
+ |> toTime()
+```
+
+## Function definition
+```js
+toTime = (tables=<-) =>
+ tables
+ |> map(fn:(r) => time(v:r._value))
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/type-conversions/touint.md b/content/v2.0/reference/flux/functions/transformations/type-conversions/touint.md
new file mode 100644
index 000000000..ea0368cd2
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/type-conversions/touint.md
@@ -0,0 +1,35 @@
+---
+title: toUInt() function
+description: The toUInt() function converts a value to an uinteger.
+menu:
+ v2_0_ref:
+ name: toUInt
+ parent: Type conversions
+ weight: 1
+---
+
+The `toUInt()` function converts a value to an UInteger.
+
+_**Function type:** Type conversion_
+_**Output data type:** UInteger_
+
+```js
+toUInt()
+```
+
+## Examples
+```js
+from(bucket: "telegraf")
+ |> filter(fn:(r) =>
+ r._measurement == "mem" and
+ r._field == "used"
+ )
+ |> toUInt()
+```
+
+## Function definition
+```js
+toUInt = (tables=<-) =>
+ tables
+ |> map(fn:(r) => uint(v:r._value))
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/union.md b/content/v2.0/reference/flux/functions/transformations/union.md
new file mode 100644
index 000000000..0de8683ae
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/union.md
@@ -0,0 +1,52 @@
+---
+title: union() function
+description: The union() function concatenates two or more input streams into a single output stream.
+menu:
+ v2_0_ref:
+ name: union
+ parent: Transformations
+ weight: 1
+---
+
+The `union()` function concatenates two or more input streams into a single output stream.
+In tables that have identical schemas and group keys, contents of the tables will be concatenated in the output stream.
+The output schemas of the `union()` function is the union of all input schemas.
+
+`union()` does not preserve the sort order of the rows within tables.
+A sort operation may be added if a specific sort order is needed.
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+```js
+union(tables: ["table1", "table2"])
+```
+
+## Parameters
+
+### tables
+Specifies the streams to union together.
+There must be at least two streams.
+
+_**Data type:** Array of streams_
+
+## Examples
+```js
+left = from(bucket: "test")
+ |> range(start: 2018-05-22T19:53:00Z, stop: 2018-05-22T19:53:50Z)
+ |> filter(fn: (r) =>
+ r._field == "usage_guest" or
+ r._field == "usage_guest_nice"
+ )
+ |> drop(columns: ["_start", "_stop"])
+
+right = from(bucket: "test")
+ |> range(start: 2018-05-22T19:53:50Z, stop: 2018-05-22T19:54:20Z)
+ |> filter(fn: (r) =>
+ r._field == "usage_guest" or
+ r._field == "usage_idle"
+ )
+ |> drop(columns: ["_start", "_stop"])
+
+union(tables: [left, right])
+```
diff --git a/content/v2.0/reference/flux/functions/transformations/window.md b/content/v2.0/reference/flux/functions/transformations/window.md
new file mode 100644
index 000000000..e2331f508
--- /dev/null
+++ b/content/v2.0/reference/flux/functions/transformations/window.md
@@ -0,0 +1,118 @@
+---
+title: window() function
+description: The window() function groups records based on a time value.
+menu:
+ v2_0_ref:
+ name: window
+ parent: Transformations
+ weight: 1
+---
+
+The `window()` function groups records based on a time value.
+New columns are added to uniquely identify each window.
+Those columns are added to the group key of the output tables.
+
+A single input record will be placed into zero or more output tables, depending on the specific windowing function.
+
+_**Function type:** Transformation_
+_**Output data type:** Object_
+
+```js
+window(
+ every: 5m,
+ period: 5m,
+ start: 12h,
+ timeColumn: "_time",
+ startColumn: "_start",
+ stopColumn: "_stop"
+)
+
+// OR
+
+window(
+ intervals: intervals(every: 5m, period: 5m, offset: 12h),
+ timeColumn: "_time",
+ startColumn: "_start",
+ stopColumn: "_stop"
+)
+```
+
+## Parameters
+
+{{% note %}}
+`every`,`period` or `intervals` is required.
+{{% /note %}}
+
+### every
+Duration of time between windows.
+Defaults to `period` value.
+
+_**Data type:** Duration_
+
+### period
+Duration of the window.
+Period is the length of each interval.
+It can be negative, indicating the start and stop boundaries are reversed.
+Defaults to `every` value.
+
+_**Data type:** Duration_
+
+### start
+The start window time relative to the [`location`](/v2.0/reference/flux/language/options/#location) offset.
+It can be negative, indicating that the start goes backwards in time.
+The default aligns the window boundaries with `now`.
+
+_**Data type:** Duration_
+
+### intervals
+A function that returns an interval generator, a set of intervals used as windows.
+
+_**Data type:** Function_
+
+###### Example interval generator function
+```js
+intervals(every:1d, period:8h, offset:9h)
+```
+
+> When `intervals` is used, `every`, `period`, and `start` cannot be used or need to be set to 0.
+
+### timeColumn
+The column containing time.
+Defaults to `"_time"`.
+
+_**Data type:** String_
+
+### startColumn
+The column containing the window start time.
+Defaults to `"_start"`.
+
+_**Data type:** String_
+
+### stopColumn
+The column containing the window stop time.
+Defaults to `"_stop"`.
+
+_**Data type:** String_
+
+## Examples
+
+#### Window data into 10 minute intervals
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-12h)
+ |> window(every:10m)
+ // ...
+```
+
+#### Window data using intervals function
+The following windows data into 8 hour intervals starting at 9AM every day.
+```js
+from(bucket:"telegraf/autogen")
+ |> range(start:-12h)
+ |> window(intervals: intervals(every:1d, period:8h, offset:9h))
+```
+
+
+
+##### Related InfluxQL functions and statements:
+[GROUP BY time()](https://docs.influxdata.com/influxdb/latest/query_language/data_exploration/#the-group-by-clause)
diff --git a/content/v2.0/reference/flux/language/_index.md b/content/v2.0/reference/flux/language/_index.md
new file mode 100644
index 000000000..d00c46420
--- /dev/null
+++ b/content/v2.0/reference/flux/language/_index.md
@@ -0,0 +1,47 @@
+---
+title: Flux language specification
+description: >
+ Covers the current and future Flux functional data scripting language,
+ which is designed for querying, analyzing, and acting on data.
+menu:
+ v2_0_ref:
+ name: Flux specification
+ parent: Flux query language
+ weight: 5
+---
+
+The following document specifies the Flux language and query execution.
+
+{{% note %}}
+This document is a living document and may not represent the current implementation of Flux.
+Any section that is not currently implemented is commented with a **[IMPL#XXX]** where
+**XXX** is an issue number tracking discussion and progress towards implementation.
+{{% /note %}}
+
+The Flux language is centered on querying and manipulating time series data.
+
+### Notation
+The syntax of the language is specified using [Extended Backus-Naur Form (EBNF)](https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_form):
+
+```js
+Production = production_name "=" [ Expression ] "." .
+Expression = Alternative { "|" Alternative } .
+Alternative = Term { Term } .
+Term = production_name | token [ "…" token ] | Group | Option | Repetition .
+Group = "(" Expression ")" .
+Option = "[" Expression "]" .
+Repetition = "{" Expression "}" .
+```
+
+Productions are expressions constructed from terms and the following operators, in increasing precedence:
+
+```
+| alternation
+() grouping
+[] option (0 or 1 times)
+{} repetition (0 to n times)
+```
+
+Lower-case production names are used to identify lexical tokens.
+Non-terminals are in Camel case.
+Lexical tokens are enclosed in double quotes (`""`) or back quotes (``).
diff --git a/content/v2.0/reference/flux/language/assignment-scope.md b/content/v2.0/reference/flux/language/assignment-scope.md
new file mode 100644
index 000000000..e528aa66f
--- /dev/null
+++ b/content/v2.0/reference/flux/language/assignment-scope.md
@@ -0,0 +1,83 @@
+---
+title: Assignment and scope
+description: An assignment binds an identifier to a variable, option, or function. Every identifier in a program must be assigned.
+menu:
+ v2_0_ref:
+ parent: Flux specification
+ name: Assignment and scope
+ weight: 20
+---
+
+{{% note %}}
+This document is a living document and may not represent the current implementation of Flux.
+Any section that is not currently implemented is commented with a **[IMPL#XXX]** where
+**XXX** is an issue number tracking discussion and progress towards implementation.
+{{% /note %}}
+
+An assignment binds an identifier to a variable, option, or function.
+Every identifier in a program must be assigned.
+
+Flux is lexically scoped using blocks:
+
+1. The scope of a preassigned identifier is in the universe block.
+2. The scope of an identifier denoting a variable, option, or function at the top level (outside any function) is the package block.
+3. The scope of the name of an imported package is the file block of the file containing the import declaration.
+4. The scope of an identifier denoting a function argument is the function body.
+5. The scope of an identifier assigned inside a function is the innermost containing block.
+
+An identifier assigned in a block may be reassigned in an inner block with the exception of option identifiers.
+While the identifier of the inner assignment is in scope, it denotes the entity assigned by the inner assignment.
+
+Note that the package clause is not an assignment.
+The package name does not appear in any scope.
+Its purpose is to identify the files belonging to the same package and to specify the default package name for import declarations.
+
+{{% note %}}
+To be implemented: [IMPL#247](https://github.com/influxdata/platform/issues/247) Add package/namespace support.
+{{% /note %}}
+
+## Variable assignment
+A variable assignment creates a variable bound to an identifier and gives it a type and value.
+A variable keeps the same type and value for the remainder of its lifetime.
+An identifier assigned to a variable in a block cannot be reassigned in the same block.
+An identifier can be reassigned or shadowed in an inner block.
+
+```js
+VariableAssignment = identifier "=" Expression
+```
+
+##### Examples of variable assignment
+
+```js
+n = 1
+m = 2
+x = 5.4
+f = () => {
+ n = "a"
+ m = "b"
+ return a + b
+}
+```
+
+## Option assignment
+```js
+OptionAssignment = "option" [ identifier "." ] identifier "=" Expression
+```
+
+An option assignment creates an option bound to an identifier and gives it a type and a value.
+Options may only be assigned in a package block.
+An identifier assigned to an option may be reassigned a new value but not a new type.
+An option keeps the same type for the remainder of its lifetime.
+
+###### Examples
+```js
+// alert package
+option severity = ["low", "moderate", "high"]
+// foo package
+import "alert"
+option alert.severity = ["low", "critical"] // qualified option
+option n = 1
+option n = 2
+f = (a, b) => a + b + n
+x = f(a:1, b:1) // x = 4
+```
diff --git a/content/v2.0/reference/flux/language/blocks.md b/content/v2.0/reference/flux/language/blocks.md
new file mode 100644
index 000000000..299422884
--- /dev/null
+++ b/content/v2.0/reference/flux/language/blocks.md
@@ -0,0 +1,25 @@
+---
+title: Blocks
+description: A block is a possibly empty sequence of statements within matching braces ({}).
+menu:
+ v2_0_ref:
+ parent: Flux specification
+ name: Blocks
+ weight: 30
+---
+
+A _block_ is a possibly empty sequence of statements within matching braces (`{}`).
+
+```
+Block = "{" StatementList "} .
+StatementList = { Statement } .
+```
+
+In addition to _explicit blocks_ in the source code, there are _implicit blocks_:
+
+1. The _universe block_ encompasses all Flux source text.
+2. Each package has a _package block_ containing all Flux source text for that package.
+3. Each file has a _file block_ containing all Flux source text in that file.
+4. Each function literal has its own _function block_ even if not explicitly declared.
+
+Blocks nest and influence scoping.
diff --git a/content/v2.0/reference/flux/language/built-ins/_index.md b/content/v2.0/reference/flux/language/built-ins/_index.md
new file mode 100644
index 000000000..9438f2842
--- /dev/null
+++ b/content/v2.0/reference/flux/language/built-ins/_index.md
@@ -0,0 +1,20 @@
+---
+title: Built-ins
+description: >
+ Flux contains many preassigned values.
+ These preassigned values are defined in the source files for the various built-in packages.
+menu:
+ v2_0_ref:
+ name: Built-ins
+ parent: Flux specification
+ weight: 80
+---
+
+Flux contains many preassigned values.
+These preassigned values are defined in the source files for the various built-in packages.
+
+## [System built-ins](/v2.0/reference/flux/language/built-ins/system-built-ins)
+When a built-in value is not expressible in Flux, its value may be defined by the hosting environment.
+
+## [Time constants](/v2.0/reference/flux/language/built-ins/time-constants)
+When a built-in value is not expressible in Flux, its value may be defined by the hosting environment.
diff --git a/content/v2.0/reference/flux/language/built-ins/system-built-ins.md b/content/v2.0/reference/flux/language/built-ins/system-built-ins.md
new file mode 100644
index 000000000..9727f6305
--- /dev/null
+++ b/content/v2.0/reference/flux/language/built-ins/system-built-ins.md
@@ -0,0 +1,24 @@
+---
+title: System built-ins
+description: >
+ When a built-in value is not expressible in Flux, its value may be defined by the hosting environment.
+ All such values must have a corresponding builtin statement to declare the existence and type of the built-in value.
+menu:
+ v2_0_ref:
+ name: System built-ins
+ parent: Built-ins
+ weight: 80
+---
+
+When a built-in value is not expressible in Flux, its value may be defined by the hosting environment.
+All such values must have a corresponding builtin statement to declare the existence and type of the built-in value.
+
+```js
+BuiltinStatement = "builtin" identifer ":" TypeExpression
+```
+
+##### Example
+
+```js
+builtin from : (bucket: string, bucketID: string) -> stream
+```
diff --git a/content/v2.0/reference/flux/language/built-ins/time-constants.md b/content/v2.0/reference/flux/language/built-ins/time-constants.md
new file mode 100644
index 000000000..835b6c8ad
--- /dev/null
+++ b/content/v2.0/reference/flux/language/built-ins/time-constants.md
@@ -0,0 +1,56 @@
+---
+title: Time constants
+description: >
+ Flux provides built-in time constants for days of the week and months of the year.
+menu:
+ v2_0_ref:
+ name: Time constants
+ parent: Built-ins
+ weight: 80
+---
+
+{{% note %}}
+This document is a living document and may not represent the current implementation of Flux.
+Any section that is not currently implemented is commented with a **[IMPL#XXX]** where
+**XXX** is an issue number tracking discussion and progress towards implementation.
+{{% /note %}}
+
+## Days of the week
+Days of the week are represented as integers in the range `[0-6]`.
+The following builtin values are defined:
+
+```js
+Sunday = 0
+Monday = 1
+Tuesday = 2
+Wednesday = 3
+Thursday = 4
+Friday = 5
+Saturday = 6
+```
+
+{{% note %}}
+To be implemented: [IMPL#153](https://github.com/influxdata/flux/issues/153) Add Days of the Week constants
+{{% /note %}}
+
+## Months of the year
+Months are represented as integers in the range `[1-12]`.
+The following builtin values are defined:
+```js
+January = 1
+February = 2
+March = 3
+April = 4
+May = 5
+June = 6
+July = 7
+August = 8
+September = 9
+October = 10
+November = 11
+December = 12
+```
+
+{{% note %}}
+To be implemented: [IMPL#154](https://github.com/influxdata/flux/issues/154) Add Months of the Year constants
+{{% /note %}}
diff --git a/content/v2.0/reference/flux/language/data-model.md b/content/v2.0/reference/flux/language/data-model.md
new file mode 100644
index 000000000..ff3e5ec6a
--- /dev/null
+++ b/content/v2.0/reference/flux/language/data-model.md
@@ -0,0 +1,60 @@
+---
+title: Flux data model
+description: Flux employs a basic data model built from basic data types. The data model consists of tables, records, columns and streams.
+menu:
+ v2_0_ref:
+ name: Data model
+ parent: Flux specification
+ weight: 1
+---
+
+{{% note %}}
+This document is a living document and may not represent the current implementation of Flux.
+Any section that is not currently implemented is commented with a **[IMPL#XXX]** where
+**XXX** is an issue number tracking discussion and progress towards implementation.
+{{% /note %}}
+
+Flux employs a basic data model built from basic data types.
+The data model consists of tables, records, columns and streams.
+
+## Record
+A **record** is a tuple of named values and is represented using an object type.
+
+## Column
+A **column** has a label and a data type.
+The available data types for a column are:
+
+| Data type | Description |
+| --------- |:----------- |
+| bool | A boolean value, true or false. |
+| uint | An unsigned 64-bit integer. |
+| int | A signed 64-bit integer. |
+| float | An IEEE-754 64-bit floating-point number. |
+| string | A sequence of unicode characters. |
+| bytes | A sequence of byte values. |
+| time | A nanosecond precision instant in time. |
+| duration | A nanosecond precision duration of time. |
+
+## Table
+A **table** is set of records with a common set of columns and a group key.
+
+The group key is a list of columns.
+A table's group key denotes which subset of the entire dataset is assigned to the table.
+All records within a table will have the same values for each column that is part of the group key.
+These common values are referred to as the "group key value" and can be represented as a set of key value pairs.
+
+A tables schema consists of its group key and its columns' labels and types.
+
+## Stream of tables
+A **stream** represents a potentially unbounded set of tables.
+A stream is grouped into individual tables using their respective group keys.
+Tables within a stream each have a unique group key value.
+
+## Missing values
+A record may be missing a value for a specific column.
+Missing values are represented with a special `null` value.
+The `null` value can be of any data type.
+
+{{% note %}}
+To be implemented: [IMPL#300](https://github.com/influxdata/platform/issues/300) Design how nulls behave
+{{% /note %}}
diff --git a/content/v2.0/reference/flux/language/expressions.md b/content/v2.0/reference/flux/language/expressions.md
new file mode 100644
index 000000000..e4396bcf0
--- /dev/null
+++ b/content/v2.0/reference/flux/language/expressions.md
@@ -0,0 +1,189 @@
+---
+title: Expressions
+description: An expression specifies the computation of a value by applying the operators and functions to operands.
+menu:
+ v2_0_ref:
+ parent: Flux specification
+ name: Expressions
+ weight: 40
+---
+
+An _expression_ specifies the computation of a value by applying the operators and functions to operands.
+
+## Operands and primary expressions
+
+Operands denote the elementary values in an expression.
+
+Primary expressions are the operands for unary and binary expressions.
+A primary expressions may be a literal, an identifier denoting a variable, or a parenthesized expression.
+
+```js
+PrimaryExpression = identifier | Literal | "(" Expression ")" .
+```
+
+## Literals
+Literals construct a value.
+
+```js
+Literal = int_lit
+ | float_lit
+ | string_lit
+ | regex_lit
+ | duration_lit
+ | pipe_receive_lit
+ | ObjectLiteral
+ | ArrayLiteral
+ | FunctionLiteral .
+```
+
+### Object literals
+Object literals construct a value with the object type.
+
+```js
+ObjectLiteral = "{" PropertyList "}" .
+PropertyList = [ Property { "," Property } ] .
+Property = identifier [ ":" Expression ]
+ | string_lit ":" Expression .
+```
+
+### Array literals
+
+Array literals construct a value with the array type.
+
+```js
+ArrayLiteral = "[" ExpressionList "]" .
+ExpressionList = [ Expression { "," Expression } ] .
+```
+
+### Function literals
+
+A _function literal_ defines a new function with a body and parameters.
+The function body may be a block or a single expression.
+The function body must have a return statement if it is an explicit block, otherwise the expression is the return value.
+
+```js
+FunctionLiteral = FunctionParameters "=>" FunctionBody .
+FunctionParameters = "(" [ ParameterList [ "," ] ] ")" .
+ParameterList = Parameter { "," Parameter } .
+Parameter = identifier [ "=" Expression ] .
+FunctionBody = Expression | Block .
+```
+
+##### Examples of function literals
+
+```js
+() => 1 // function returns the value 1
+(a, b) => a + b // function returns the sum of a and b
+(x=1, y=1) => x * y // function with default values
+(a, b, c) => { // function with a block body
+ d = a + b
+ return d / c
+}
+
+```
+All function literals are anonymous.
+A function may be given a name using a variable assignment.
+
+```
+add = (a,b) => a + b
+mul = (a,b) => a * b
+```
+
+Function literals are _closures_ and may refer to variables defined in a surrounding block.
+Those variables are shared between the function literal and the surrounding block.
+
+## Call expressions
+
+A _call expression_ invokes a function with the provided arguments.
+Arguments must be specified using the argument name.
+Positional arguments are not supported.
+Argument order does not matter.
+When an argument has a default value, it is not required to be specified.
+
+```js
+CallExpression = "(" PropertyList ")" .
+```
+
+##### Examples of call expressions
+
+```js
+f(a:1, b:9.6)
+float(v:1)
+```
+
+## Pipe expressions
+
+A _pipe expression_ is a call expression with an implicit piped argument.
+Pipe expressions simplify creating long nested call chains.
+
+Pipe expressions pass the result of the left hand expression as the _pipe argument_ to the right hand call expression.
+Function literals specify which if any argument is the pipe argument using the _pipe literal_ as the argument's default value.
+It is an error to use a pipe expression if the function does not declare a pipe argument.
+
+```js
+pipe_receive_lit = "<-" .
+```
+
+##### Examples of pipe expressions
+
+```js
+foo = () => // function body elided
+bar = (x=<-) => // function body elided
+baz = (y=<-) => // function body elided
+foo() |> bar() |> baz() // equivalent to baz(x:bar(y:foo()))
+```
+
+## Index expressions
+Index expressions access a value from an array based on a numeric index.
+
+```js
+IndexExpression = "[" Expression "]" .
+```
+
+## Member expressions
+Member expressions access a property of an object.
+The property being accessed must be either an identifier or a string literal.
+In either case the literal value is the name of the property being accessed, the identifier is not evaluated.
+It is not possible to access an object's property using an arbitrary expression.
+
+```js
+MemberExpression = DotExpression | MemberBracketExpression
+DotExpression = "." identifer
+MemberBracketExpression = "[" string_lit "]" .
+```
+
+### Operators
+Operators combine operands into expressions.
+Operator precedence is encoded directly into the grammar.
+
+```js
+Expression = LogicalExpression .
+LogicalExpression = UnaryLogicalExpression
+ | LogicalExpression LogicalOperator UnaryLogicalExpression .
+LogicalOperator = "and" | "or" .
+UnaryLogicalExpression = ComparisonExpression
+ | UnaryLogicalOperator UnaryLogicalExpression .
+UnaryLogicalOperator = "not" .
+ComparisonExpression = MultiplicativeExpression
+ | ComparisonExpression ComparisonOperator MultiplicativeExpression .
+ComparisonOperator = "==" | "!=" | "<" | "<=" | ">" | ">=" | "=~" | "!~" .
+MultiplicativeExpression = AdditiveExpression
+ | MultiplicativeExpression MultiplicativeOperator AdditiveExpression .
+MultiplicativeOperator = "*" | "/" .
+AdditiveExpression = PipeExpression
+ | AdditiveExpression AdditiveOperator PipeExpression .
+AdditiveOperator = "+" | "-" .
+PipeExpression = PostfixExpression
+ | PipeExpression PipeOperator UnaryExpression .
+PipeOperator = "|>" .
+UnaryExpression = PostfixExpression
+ | PrefixOperator UnaryExpression .
+PrefixOperator = "+" | "-" .
+PostfixExpression = PrimaryExpression
+ | PostfixExpression PostfixOperator .
+PostfixOperator = MemberExpression
+ | CallExpression
+ | IndexExpression .
+```
+
+_Also see [Flux Operators](/v2.0/reference/flux/language/operators)._
diff --git a/content/v2.0/reference/flux/language/lexical-elements.md b/content/v2.0/reference/flux/language/lexical-elements.md
new file mode 100644
index 000000000..841b54a6a
--- /dev/null
+++ b/content/v2.0/reference/flux/language/lexical-elements.md
@@ -0,0 +1,359 @@
+---
+title: Lexical elements
+description: Descriptions of Flux comments, tokens, identifiers, keywords, and other lexical elements.
+menu:
+ v2_0_ref:
+ parent: Flux specification
+ name: Lexical elements
+ weight: 50
+---
+
+{{% note %}}
+This document is a living document and may not represent the current implementation of Flux.
+Any section that is not currently implemented is commented with a **[IMPL#XXX]** where
+**XXX** is an issue number tracking discussion and progress towards implementation.
+{{% /note %}}
+
+## Comments
+
+Comment serve as documentation.
+Comments begin with the character sequence `//` and stop at the end of the line.
+
+Comments cannot start inside string or regexp literals.
+Comments act like newlines.
+
+## Tokens
+
+Flux is built up from tokens.
+There are four classes of tokens:
+
+* _identifiers_
+* _keywords_
+* _operators_
+* _literals_
+
+
+_White space_ formed from spaces, horizontal tabs, carriage returns, and newlines is ignored except as it separates tokens that would otherwise combine into a single token.
+While breaking the input into tokens, the next token is the longest sequence of characters that form a valid token.
+
+## Identifiers
+
+Identifiers name entities within a program.
+An _identifier_ is a sequence of one or more letters and digits.
+An identifier must start with a letter.
+
+```js
+ identifier = letter { letter | unicode_digit } .
+```
+
+##### Examples of identifiers
+
+```
+a
+_x
+longIdentifierName
+αβ
+```
+
+## Keywords
+
+The following keywords are reserved and may not be used as identifiers:
+
+```
+and import not return option
+empty in or package builtin
+```
+
+{{% note %}}
+To be implemented: [IMPL#256](https://github.com/influxdata/platform/issues/256) Add in and empty operator support.
+
+To be implemented: [IMPL#334](https://github.com/influxdata/platform/issues/334) Add "import" support
+{{% /note %}}
+
+
+## Operators
+
+The following character sequences represent operators:
+
+```
++ == != ( )
+- < !~ [ ]
+* > =~ { }
+/ <= = , :
+% >= <- . |>
+```
+
+## Numeric literals
+
+Numeric literals may be integers or floating point values.
+Literals have arbitrary precision and are coerced to a specific type when used.
+
+The following coercion rules apply to numeric literals:
+
+* An integer literal can be coerced to an "int", "uint", or "float" type,
+* A float literal can be coerced to a "float" type.
+* An error will occur if the coerced type cannot represent the literal value.
+
+{{% note %}}
+To be implemented: [IMPL#255](https://github.com/influxdata/platform/issues/255) Allow numeric literal coercion.
+{{% /note %}}
+
+### Integer literals
+
+An integer literal is a sequence of digits representing an integer value.
+Only decimal integers are supported.
+
+```js
+ int_lit = "0" | decimal_lit .
+ decimal_lit = ( "1" … "9" ) { decimal_digit } .
+```
+
+##### Examples of integer literals
+
+```
+0
+42
+317316873
+```
+
+## Floating-point literals
+
+A _floating-point literal_ is a decimal representation of a floating-point value.
+It has an integer part, a decimal point, and a fractional part.
+The integer and fractional part comprise decimal digits.
+One of the integer part or the fractional part may be elided.
+
+```js
+float_lit = decimals "." [ decimals ]
+ | "." decimals .
+decimals = decimal_digit { decimal_digit } .
+```
+
+##### Examples of floating-point literals
+
+```js
+0.
+72.40
+072.40 // == 72.40
+2.71828
+.26
+```
+
+{{% note %}}
+To be implemented: [IMPL#254](https://github.com/influxdata/platform/issues/254) Parse float literals.
+{{% /note %}}
+
+### Duration literals
+
+A _duration literal_ is a representation of a length of time.
+It has an integer part and a duration unit part.
+Multiple durations may be specified together and the resulting duration is the sum of each smaller part.
+When several durations are specified together, larger units must appear before smaller ones, and there can be no repeated units.
+
+```js
+duration_lit = { int_lit duration_unit } .
+duration_unit = "y" | "mo" | "w" | "d" | "h" | "m" | "s" | "ms" | "us" | "µs" | "ns" .
+```
+
+| Units | Meaning |
+| ----- | ------- |
+| y | year (12 months) |
+| mo | month |
+| w | week (7 days) |
+| d | day |
+| h | hour (60 minutes) |
+| m | minute (60 seconds) |
+| s | second |
+| ms | milliseconds (1 thousandth of a second) |
+| us or µs | microseconds (1 millionth of a second) |
+| ns | nanoseconds (1 billionth of a second) |
+
+Durations represent a length of time.
+Lengths of time are dependent on specific instants in time they occur and as such, durations do not represent a fixed amount of time.
+No amount of seconds is equal to a day, as days vary in their number of seconds.
+No amount of days is equal to a month, as months vary in their number of days.
+A duration consists of three basic time units: seconds, days and months.
+
+Durations can be combined via addition and subtraction.
+Durations can be multiplied by an integer value.
+These operations are performed on each time unit independently.
+
+##### Examples of duration literals
+
+```js
+1s
+10d
+1h15m // 1 hour and 15 minutes
+5w
+1mo5d // 1 month and 5 days
+```
+Durations can be added to date times to produce a new date time.
+
+Addition and subtraction of durations to date times do not commute and are left associative.
+Addition and subtraction of durations to date times applies months, days and seconds in that order.
+When months are added to a date times and the resulting date is past the end of the month, the day is rolled back to the last day of the month.
+
+##### Examples of duration literals
+
+```js
+2018-01-01T00:00:00Z + 1d // 2018-01-02T00:00:00Z
+2018-01-01T00:00:00Z + 1mo // 2018-02-01T00:00:00Z
+2018-01-01T00:00:00Z + 2mo // 2018-03-01T00:00:00Z
+2018-01-31T00:00:00Z + 2mo // 2018-03-31T00:00:00Z
+2018-02-28T00:00:00Z + 2mo // 2018-04-28T00:00:00Z
+2018-01-31T00:00:00Z + 1mo // 2018-02-28T00:00:00Z, February 31th is rolled back to the last day of the month, February 28th in 2018.
+
+// Addition and subtraction of durations to date times does not commute
+2018-02-28T00:00:00Z + 1mo + 1d // 2018-03-29T00:00:00Z
+2018-02-28T00:00:00Z + 1d + 1mo // 2018-04-01T00:00:00Z
+2018-01-01T00:00:00Z + 2mo - 1d // 2018-02-28T00:00:00Z
+2018-01-01T00:00:00Z - 1d + 3mo // 2018-03-31T00:00:00Z
+
+// Addition and subtraction of durations to date times applies months, days and seconds in that order.
+2018-01-28T00:00:00Z + 1mo + 2d // 2018-03-02T00:00:00Z
+2018-01-28T00:00:00Z + 1mo2d // 2018-03-02T00:00:00Z
+2018-01-28T00:00:00Z + 2d + 1mo // 2018-02-28T00:00:00Z, explicit left associative add of 2d first changes the result
+2018-02-01T00:00:00Z + 2mo2d // 2018-04-03T00:00:00Z
+2018-01-01T00:00:00Z + 1mo30d // 2018-03-02T00:00:00Z, Months are applied first to get February 1st, then days are added resulting in March 2 in 2018.
+2018-01-31T00:00:00Z + 1mo1d // 2018-03-01T00:00:00Z, Months are applied first to get February 28th, then days are added resulting in March 1 in 2018.
+```
+
+{{% note %}}
+To be implemented: [IMPL#657](https://github.com/influxdata/platform/issues/657) Implement Duration vectors.
+{{% /note %}}
+
+## Date and time literals
+
+A _date and time literal_ represents a specific moment in time.
+It has a date part, a time part and a time offset part.
+The format follows the [RFC 3339](https://tools.ietf.org/html/rfc3339) specification.
+The time is optional.
+When it is omitted, the time is assumed to be midnight for the default location.
+The `time_offset` is optional.
+When it is omitted, the location option is used to determine the offset.
+
+```js
+date_time_lit = date [ "T" time ] .
+date = year_lit "-" month "-" day .
+year = decimal_digit decimal_digit decimal_digit decimal_digit .
+month = decimal_digit decimal_digit .
+day = decimal_digit decimal_digit .
+time = hour ":" minute ":" second [ fractional_second ] [ time_offset ] .
+hour = decimal_digit decimal_digit .
+minute = decimal_digit decimal_digit .
+second = decimal_digit decimal_digit .
+fractional_second = "." { decimal_digit } .
+time_offset = "Z" | ("+" | "-" ) hour ":" minute .
+```
+
+##### Examples of date and time literals
+
+```js
+1952-01-25T12:35:51Z
+2018-08-15T13:36:23-07:00
+2009-10-15T09:00:00 // October 15th 2009 at 9 AM in the default location
+2018-01-01 // midnight on January 1st 2018 in the default location
+```
+
+{{% note %}}
+To be implemented: [IMPL#152](https://github.com/influxdata/flux/issues/152) Implement shorthand time literals.
+{{% /note %}}
+
+### String literals
+
+A _string literal_ represents a sequence of characters enclosed in double quotes.
+Within the quotes any character may appear except an unescaped double quote.
+String literals support several escape sequences.
+
+```
+\n U+000A line feed or newline
+\r U+000D carriage return
+\t U+0009 horizontal tab
+\" U+0022 double quote
+\\ U+005C backslash
+\{ U+007B open curly bracket
+\} U+007D close curly bracket
+```
+
+Additionally, any byte value may be specified via a hex encoding using `\x` as the prefix.
+
+```
+string_lit = `"` { unicode_value | byte_value | StringExpression | newline } `"` .
+byte_value = `\` "x" hex_digit hex_digit .
+hex_digit = "0" … "9" | "A" … "F" | "a" … "f" .
+unicode_value = unicode_char | escaped_char .
+escaped_char = `\` ( "n" | "r" | "t" | `\` | `"` ) .
+StringExpression = "{" Expression "}" .
+```
+
+{{% note %}}
+To be added: TODO: With string interpolation `string_lit` is not longer a lexical token as part of a literal, but an entire expression in and of itself.
+
+To be implemented: [IMPL#252](https://github.com/influxdata/platform/issues/252) Parse string literals.
+{{% /note %}}
+
+##### Examples of string literals
+
+```js
+"abc"
+"string with double \" quote"
+"string with backslash \\"
+"日本語"
+"\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e" // the explicit UTF-8 encoding of the previous line
+```
+
+String literals are also interpolated for embedded expressions to be evaluated as strings.
+Embedded expressions are enclosed in curly brackets (`{}`).
+The expressions are evaluated in the scope containing the string literal.
+The result of an expression is formatted as a string and replaces the string content between the brackets.
+All types are formatted as strings according to their literal representation.
+A function `printf` exists to allow more precise control over formatting of various types.
+To include the literal curly brackets within a string they must be escaped.
+
+{{% note %}}
+To be implemented: [IMPL#248](https://github.com/influxdata/platform/issues/248) Add printf function.
+{{% /note %}}
+
+##### Example: Interpolation
+
+```js
+n = 42
+"the answer is {n}" // the answer is 42
+"the answer is not {n+1}" // the answer is not 43
+"openinng curly bracket \{" // openinng curly bracket {
+"closing curly bracket \}" // closing curly bracket }
+```
+
+{{% note %}}
+To be implemented: [IMPL#251](https://github.com/influxdata/platform/issues/251) Add string interpolation support
+{{% /note %}}
+
+### Regular expression literals
+
+A _regular expression literal_ represents a regular expression pattern, enclosed in forward slashes.
+Within the forward slashes, any unicode character may appear except for an unescaped forward slash.
+The `\x` hex byte value representation from string literals may also be present.
+
+Regular expression literals support only the following escape sequences:
+
+```
+ \/ U+002f forward slash
+ \\ U+005c backslash
+```
+
+```
+regexp_lit = "/" { unicode_char | byte_value | regexp_escape_char } "/" .
+regexp_escape_char = `\` (`/` | `\`)
+```
+
+##### Examples of regular expression literals
+
+```js
+/.*/
+/http:\/\/localhost:9999/
+/^\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e(ZZ)?$/
+/^日本語(ZZ)?$/ // the above two lines are equivalent
+/\\xZZ/ // this becomes the literal pattern "\xZZ"
+```
+
+The regular expression syntax is defined by [RE2](https://github.com/google/re2/wiki/Syntax).
diff --git a/content/v2.0/reference/flux/language/notation.md b/content/v2.0/reference/flux/language/notation.md
new file mode 100644
index 000000000..3b53e81ce
--- /dev/null
+++ b/content/v2.0/reference/flux/language/notation.md
@@ -0,0 +1,34 @@
+---
+title: Notation
+description: Notation principles for the Flux functional data scripting language.
+menu:
+ v2_0_ref:
+ parent: Flux specification
+ name: Notation
+ weight: 60
+---
+
+The syntax of the language is specified using [Extended Backus-Naur Form (EBNF)](https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_form):
+
+```
+Production = production_name "=" [ Expression ] "." .
+Expression = Alternative { "|" Alternative } .
+Alternative = Term { Term } .
+Term = production_name | token [ "…" token ] | Group | Option | Repetition .
+Group = "(" Expression ")" .
+Option = "[" Expression "]" .
+Repetition = "{" Expression "}" .
+```
+
+A _production_ is an expression constructed from terms and the following operators, in increasing precedence:
+
+```
+| alternation
+() grouping
+[] option (0 or 1 times)
+{} repetition (0 to n times)
+```
+
+Lowercase production names are used to identify lexical tokens.
+Non-terminals are in [camel case](https://en.wikipedia.org/wiki/Camel_case).
+Lexical tokens are enclosed in double quotes (`""`) or back quotes (``).
diff --git a/content/v2.0/reference/flux/language/operators.md b/content/v2.0/reference/flux/language/operators.md
new file mode 100644
index 000000000..8d519c5af
--- /dev/null
+++ b/content/v2.0/reference/flux/language/operators.md
@@ -0,0 +1,105 @@
+---
+title: Operators in the Flux language
+description: Flux supports many types of operators including arithmetic operators, comparison operators, function operators, and others.
+menu:
+ v2_0_ref:
+ name: Operators
+ parent: Flux specification
+ weight: 130
+---
+
+Flux includes the following types of operators:
+
+- [Arithmetic operators](#arithmetic-operators)
+- [Comparison operators](#comparison-operators)
+- [Assignment operators](#assignment-operators)
+- [Function operators](#function-operators)
+- [String Operators](#string-operators)
+- [Literal constructors](#literal-constructors)
+- [Miscellaneous operators](#miscellaneous-operators)
+
+## Arithmetic operators
+Arithmetic operators take two numerical values (either literals or variables) and
+perform a calculation that returns a single numerical value.
+
+| Operator | Description | Example | Result |
+|:--------:| ----------- | ------- | ------ |
+| `+` | Addition | `1 + 1` | `2` |
+| `-` | Subtraction | `3 - 2` | `1` |
+| `*` | Multiplication | `2 * 3` | `6` |
+| `/` | Division | `9 / 3` | `3` |
+| `%` | Modulus | `10 % 5` | `0` |
+
+{{% note %}}
+In the current version of Flux, values used in arithmetic operations must
+be of the same numeric type (integer or float).
+Operations with values of different numeric types will result in a type error.
+{{% /note %}}
+
+## Comparison operators
+Comparison operators compare expressions and return true or false based on the comparison.
+
+| Operator | Description | Example | Result |
+|:--------:| ----------- | ------- | ------ |
+| `==` | Equal to | `"abc" == "abc"` | `true` |
+| `!=` | Not equal to | `"abc" != "def"` | `true` |
+| `<` | Less than | `1 < 2` | `true` |
+| `>` | Greater than | `1 > 2` | `false` |
+| `<=` | Less than or equal | `1 <= 2` | `true` |
+| `>=` | Greater than or equal | `1 >= 2` | `false` |
+| `=~` | Equal to regular expression | `"abc" =~ /[a-z]*/` | `true` |
+| `!~` | Not equal to regular expression | `"abc" !~ /[0-9]*/` | `true` |
+
+{{% note %}}
+The `>` and `<` operators also [compare the lexicographic order of strings](#string-operators).
+{{% /note %}}
+
+## Assignment operators
+An assignment operator assigns a value to its left operand based on the value of its right operand.
+
+| Operator | Description | Example | Meaning |
+|:--------:| ----------- | ------- | ------- |
+| `=` | Assign value of left expression to right expression | `x = y` | x = y |
+
+
+## Function operators
+Function operators facilitate the creation of functions and control the flow of data through operations.
+
+| Operator | Description | Examples | Meaning |
+|:--------: | ----------- | -------- | ------- |
+| |> | Pipe‑forward | data |> function() | Tables contained in the "data" variable are piped into the function. |
+| `<-` | Pipe‑receive | `tables=<-` | The "tables" variable or parameter is assigned to data piped into the operation. _This operator is used for any data type passed into a function; not just table data._ |
+| `=>` | Arrow | `(r) => r.tag1 == "tagvalue"` | The arrow passes an object or parameters into function operations. |
+| `()` | Function call | `top(n:10)` | Call the `top` function setting the `n` parameter to `10` and perform the associated operations. |
+
+---
+
+_See [Custom functions](#) for examples of function operators is use._
+
+---
+
+## String Operators
+String operators concatenate or compare string values.
+
+| Operator | Description | Examples | Result |
+|:--------:| ----------- | -------- | ------ |
+| `+` | Concatenation | `"ab" + "c"` | `"abc"` |
+| `<` | Less than in lexicographic order | `"ant" < "bee"` | `true` |
+| `>` | Greater than in lexicographic order | `"ant" > "bee"` | `false` |
+
+## Literal constructors
+Literal constructors define fixed values.
+
+| Operator | Description |
+|:--------:| ----------- |
+| `[ ]` | List / array |
+| `{ }` | Object |
+| `""` | String |
+
+## Miscellaneous operators
+| Operator | Description | Example |
+|:--------:| ----------- | ------- |
+| `( )` | Logical grouping | `r._value / (r._value * 2)` |
+| `,` | Sequence delimiter | `item1, item2, item3` |
+| `:` | Key-value separator | `{name: "Bob"}` |
+| `.` | Dot reference | `r._measurement` |
diff --git a/content/v2.0/reference/flux/language/options.md b/content/v2.0/reference/flux/language/options.md
new file mode 100644
index 000000000..1715d82ca
--- /dev/null
+++ b/content/v2.0/reference/flux/language/options.md
@@ -0,0 +1,62 @@
+---
+title: Options
+description: >
+ A Flux option represents a storage location for any value of a specified type.
+ Options are mutable. An option can hold different values during its lifetime.
+menu:
+ v2_0_ref:
+ parent: Flux specification
+ name: Options
+ weight: 110
+---
+
+{{% note %}}
+This document is a living document and may not represent the current implementation of Flux.
+Any section that is not currently implemented is commented with a **[IMPL#XXX]** where
+**XXX** is an issue number tracking discussion and progress towards implementation.
+{{% /note %}}
+
+An option represents a storage location for any value of a specified type.
+Options are mutable.
+An option can hold different values during its lifetime.
+
+Below is a list of built-in options currently implemented in the Flux language:
+
+- now
+- task
+- location
+
+##### now
+The `now` option is a function that returns a time value used as a proxy for the current system time.
+
+```js
+// Query should execute as if the below time is the current system time
+option now = () => 2006-01-02T15:04:05-07:00
+```
+
+##### task
+The `task` option schedules the execution of a Flux query.
+
+```js
+option task = {
+ name: "foo", // Name is required.
+ every: 1h, // Task should be run at this interval.
+ delay: 10m, // Delay scheduling this task by this duration.
+ cron: "0 2 * * *", // Cron is a more sophisticated way to schedule. 'every' and 'cron' are mutually exclusive.
+ retry: 5, // Number of times to retry a failed query.
+}
+```
+
+##### location
+The `location` option sets the default time zone of all times in the script.
+The location maps the UTC offset in use at that location for a given time.
+The default value is set using the time zone of the running process.
+
+```js
+option location = fixedZone(offset:-5h) // Set timezone to be 5 hours west of UTC.
+option location = loadLocation(name:"America/Denver") // Set location to be America/Denver.
+```
+
+{{% note %}}
+To be implemented: [IMPL#660](https://github.com/influxdata/platform/issues/660) Implement Location option
+{{% /note %}}
diff --git a/content/v2.0/reference/flux/language/packages.md b/content/v2.0/reference/flux/language/packages.md
new file mode 100644
index 000000000..b1889f35f
--- /dev/null
+++ b/content/v2.0/reference/flux/language/packages.md
@@ -0,0 +1,71 @@
+---
+title: Packages
+description: >
+ Flux source is organized into packages.
+ A package consists of one or more source files.
+ Each source file is parsed individually and composed into a single package.
+aliases:
+ - /v2.0/reference/flux/language/programs
+menu:
+ v2_0_ref:
+ parent: Flux specification
+ name: Packages
+ weight: 70
+---
+
+{{% note %}}
+This document is a living document and may not represent the current implementation of Flux.
+Any section that is not currently implemented is commented with a **[IMPL#XXX]** where
+**XXX** is an issue number tracking discussion and progress towards implementation.
+{{% /note %}}
+
+Flux source is organized into packages.
+A package consists of one or more source files.
+Each source file is parsed individually and composed into a single package.
+
+```js
+File = [ PackageClause ] [ ImportList ] StatementList .
+ImportList = { ImportDeclaration } .
+```
+
+## Package clause
+
+```js
+PackageClause = "package" identifier .
+```
+
+A _package clause_ defines the name for the current package.
+Package names must be valid Flux identifiers.
+The package clause must be at the beginning of any Flux source file.
+All files in the same package must declare the same package name.
+When a file does not declare a package clause, all identifiers in that
+file will belong to the special `main` package.
+
+{{% note %}}
+To be implemented: [IMPL#247](https://github.com/influxdata/platform/issues/247) Add package/namespace support.
+{{% /note %}}
+
+### Package main
+
+The `main` package is special for a few reasons:
+
+1. It defines the entry point of a Flux program.
+2. It cannot be imported.
+3. All statements are marked as producing side effects.
+
+## Package initialization
+
+Packages are initialized in the following order:
+
+1. All imported packages are initialized and assigned to their package identifier.
+2. All option declarations are evaluated and assigned regardless of order. An option cannot have dependencies on any other options assigned in the same package block.
+3. All variable declarations are evaluated and assigned regardless of order. A variable cannot have a direct or indirect dependency on itself.
+4. Any package side effects are evaluated.
+
+A package will only be initialized once across all file blocks and across all packages blocks regardless of how many times it is imported.
+
+Initializing imported packages must be deterministic.
+Specifically after all imported packages are initialized, each option must be assigned the same value.
+Packages imported in the same file block are initialized in declaration order.
+Packages imported across different file blocks have no known order.
+When a set of imports modify the same option, they must be ordered by placing them in the same file block.
diff --git a/content/v2.0/reference/flux/language/representation.md b/content/v2.0/reference/flux/language/representation.md
new file mode 100644
index 000000000..9b6a6ddbb
--- /dev/null
+++ b/content/v2.0/reference/flux/language/representation.md
@@ -0,0 +1,37 @@
+---
+title: Representation
+description: Source code is encoded in UTF-8. The text need not be canonicalized.
+menu:
+ v2_0_ref:
+ parent: Flux specification
+ name: Representation
+ weight: 80
+---
+
+Source code is encoded in UTF-8.
+The text need not be canonicalized.
+
+## Characters
+
+This document will use the term _character_ to refer to a Unicode code point.
+
+The following terms are used to denote specific Unicode character classes:
+
+```
+newline = /* the Unicode code point U+000A */ .
+unicode_char = /* an arbitrary Unicode code point except newline */ .
+unicode_letter = /* a Unicode code point classified as "Letter" */ .
+unicode_digit = /* a Unicode code point classified as "Number, decimal digit" */ .
+```
+
+In The Unicode Standard 8.0, Section 4.5, "General Category" defines a set of character categories.
+Flux treats all characters in any of the Letter categories (Lu, Ll, Lt, Lm, or Lo) as Unicode letters, and those in the Number category (Nd) as Unicode digits.
+
+### Letters and digits
+
+The underscore character `_` (`U+005F`) is considered a letter.
+
+```
+letter = unicode_letter | "_" .
+decimal_digit = "0" … "9" .
+```
diff --git a/content/v2.0/reference/flux/language/side-effects.md b/content/v2.0/reference/flux/language/side-effects.md
new file mode 100644
index 000000000..14054928c
--- /dev/null
+++ b/content/v2.0/reference/flux/language/side-effects.md
@@ -0,0 +1,16 @@
+---
+title: Side effects
+description: A summary of side effects in the Flux functional data scripting language.
+menu:
+ v2_0_ref:
+ parent: Flux specification
+ name: Side effects
+ weight: 90
+---
+
+Side effects can occur in one of two ways.
+
+1. By reassigning built-in options
+2. By calling a function that produces side effects
+
+A function produces side effects when it is explicitly declared to have side effects or when it calls a function that itself produces side effects.
diff --git a/content/v2.0/reference/flux/language/statements.md b/content/v2.0/reference/flux/language/statements.md
new file mode 100644
index 000000000..c0e8cd2d8
--- /dev/null
+++ b/content/v2.0/reference/flux/language/statements.md
@@ -0,0 +1,161 @@
+---
+title: Statements
+description: Statements control execution in the Flux functional data scripting language.
+menu:
+ v2_0_ref:
+ parent: Flux specification
+ name: Statements
+ weight: 100
+---
+
+{{% note %}}
+This document is a living document and may not represent the current implementation of Flux.
+Any section that is not currently implemented is commented with a **[IMPL#XXX]** where
+**XXX** is an issue number tracking discussion and progress towards implementation.
+{{% /note %}}
+
+A _statement_ controls execution.
+
+```js
+Statement = OptionAssignment
+ | BuiltinStatement
+ | VariableAssignment
+ | ReturnStatement
+ | ExpressionStatement .
+```
+
+## Import declaration
+
+```js
+ImportDeclaration = "import" [identifier] string_lit
+```
+
+A package name and an import path is associated with every package.
+The import statement takes a package's import path and brings all of the identifiers
+defined in that package into the current scope under a namespace.
+The import statement defines the namespace through which to access the imported identifiers.
+By default the identifier of this namespace is the package name unless otherwise specified.
+For example, given a variable `x` declared in package `foo`, importing `foo` and referencing `x` would look like this:
+
+```js
+import "import/path/to/package/foo"
+
+foo.x
+```
+
+Or this:
+
+```js
+import bar "import/path/to/package/foo"
+
+bar.x
+```
+
+A package's import path is always absolute.
+A package may reassign a new value to an option identifier declared in one of its imported packages.
+A package cannot access nor modify the identifiers belonging to the imported packages of its imported packages.
+Every statement contained in an imported package is evaluated.
+
+## Return statements
+
+A terminating statement prevents execution of all statements that appear after it in the same block.
+A return statement is a terminating statement.
+
+```
+ReturnStatement = "return" Expression .
+```
+## Expression statements
+
+An _expression statement_ is an expression where the computed value is discarded.
+
+```
+ExpressionStatement = Expression .
+```
+
+##### Examples of expression statements
+
+```js
+1 + 1
+f()
+a
+```
+
+## Named types
+
+A named type can be created using a type assignment statement.
+A named type is equivalent to the type it describes and may be used interchangeably.
+
+```js
+TypeAssignement = "type" identifier "=" TypeExpression
+TypeExpression = identifier
+ | TypeParameter
+ | ObjectType
+ | ArrayType
+ | GeneratorType
+ | FunctionType .
+TypeParameter = "'" identifier .
+ObjectType = "{" PropertyTypeList [";" ObjectUpperBound ] "}" .
+ObjectUpperBound = "any" | PropertyTypeList .
+PropertyTypeList = PropertyType [ "," PropertyType ] .
+PropertyType = identifier ":" TypeExpression
+ | string_lit ":" TypeExpression .
+ArrayType = "[]" TypeExpression .
+GeneratorType = "[...]" TypeExpression .
+FunctionType = ParameterTypeList "->" TypeExpression
+ParameterTypeList = "(" [ ParameterType { "," ParameterType } ] ")" .
+ParameterType = identifier ":" [ pipe_receive_lit ] TypeExpression .
+```
+
+Named types are a separate namespace from values.
+It is possible for a value and a type to have the same identifier.
+The following named types are built-in.
+
+```js
+bool // boolean
+int // integer
+uint // unsigned integer
+float // floating point number
+duration // duration of time
+time // time
+string // utf-8 encoded string
+regexp // regular expression
+type // a type that itself describes a type
+```
+
+When an object's upper bound is not specified, it is assumed to be equal to its lower bound.
+
+Parameters to function types define whether the parameter is a pipe forward
+parameter and whether the parameter has a default value.
+The `<-` indicates the parameter is the pipe forward parameter.
+
+###### Examples
+```js
+ // alias the bool type
+ type boolean = bool
+
+ // define a person as an object type
+ type person = {
+ name: string,
+ age: int,
+ }
+
+ // Define addition on ints
+ type intAdd = (a: int, b: int) -> int
+
+ // Define polymorphic addition
+ type add = (a: 'a, b: 'a) -> 'a
+
+ // Define funcion with pipe parameter
+ type bar = (foo: <-string) -> string
+
+ // Define object type with an empty lower bound and an explicit upper bound
+ type address = {
+ ;
+ street: string,
+ city: string,
+ state: string,
+ country: string,
+ province: string,
+ zip: int,
+ }
+```
diff --git a/content/v2.0/reference/flux/language/types.md b/content/v2.0/reference/flux/language/types.md
new file mode 100644
index 000000000..8e4a346d0
--- /dev/null
+++ b/content/v2.0/reference/flux/language/types.md
@@ -0,0 +1,102 @@
+---
+title: Types
+description: A type defines the set of values and operations on those values. Types are never explicitly declared as part of the syntax. Types are always inferred from the usage of the value.
+menu:
+ v2_0_ref:
+ parent: Flux specification
+ name: Types
+ weight: 110
+---
+
+{{% note %}}
+This document is a living document and may not represent the current implementation of Flux.
+Any section that is not currently implemented is commented with a **[IMPL#XXX]** where
+**XXX** is an issue number tracking discussion and progress towards implementation.
+{{% /note %}}
+
+A _type_ defines the set of values and operations on those values.
+Types are never explicitly declared as part of the syntax.
+Types are always inferred from the usage of the value.
+
+{{% note %}}
+To be implemented: [IMPL#249](https://github.com/influxdata/platform/issues/249) Specify type inference rules.
+{{% /note %}}
+
+## Boolean types
+
+A _boolean type_ represents a truth value, corresponding to the preassigned variables `true` and `false`.
+The boolean type name is `bool`.
+
+## Numeric types
+
+A _numeric type_ represents sets of integer or floating-point values.
+
+The following numeric types exist:
+
+```
+uint the set of all unsigned 64-bit integers
+int the set of all signed 64-bit integers
+float the set of all IEEE-754 64-bit floating-point numbers
+```
+
+## Time types
+
+A _time type_ represents a single point in time with nanosecond precision.
+The time type name is `time`.
+
+## Duration types
+
+A _duration type_ represents a length of time with nanosecond precision.
+The duration type name is `duration`.
+
+Durations can be added to times to produce a new time.
+
+##### Examples of duration types
+
+```js
+2018-07-01T00:00:00Z + 1mo // 2018-08-01T00:00:00Z
+2018-07-01T00:00:00Z + 2y // 2020-07-01T00:00:00Z
+2018-07-01T00:00:00Z + 5h // 2018-07-01T05:00:00Z
+```
+
+## String types
+
+A _string type_ represents a possibly empty sequence of characters.
+Strings are immutable and cannot be modified once created.
+The string type name is `string`.
+
+The length of a string is its size in bytes, not the number of characters, since a single character may be multiple bytes.
+
+## Regular expression types
+
+A _regular expression type_ represents the set of all patterns for regular expressions.
+The regular expression type name is `regexp`.
+
+## Array types
+
+An _array type_ represents a sequence of values of any other type.
+All values in the array must be of the same type.
+The length of an array is the number of elements in the array.
+
+## Object types
+
+An _object type_ represents a set of unordered key and value pairs.
+The key must always be a string.
+The value may be any other type, and need not be the same as other values within the object.
+
+## Function types
+
+A _function type_ represents a set of all functions with the same argument and result types.
+
+{{% note %}}
+To be implemented: [IMPL#249](https://github.com/influxdata/platform/issues/249) Specify type inference rules.
+{{% /note %}}
+
+## Generator types
+
+A _generator type_ represents a value that produces an unknown number of other values.
+The generated values may be of any other type, but must all be the same type.
+
+{{% note %}}
+To be implemented: [IMPL#658](https://github.com/influxdata/platform/query/issues/658) Implement Generators types.
+{{% /note %}}
diff --git a/content/v2.0/reference/flux/language/variables.md b/content/v2.0/reference/flux/language/variables.md
new file mode 100644
index 000000000..9ea0e5ebe
--- /dev/null
+++ b/content/v2.0/reference/flux/language/variables.md
@@ -0,0 +1,13 @@
+---
+title: Variables
+description: Flux variables hold values. A variable can only hold values defined by its type.
+menu:
+ v2_0_ref:
+ parent: Flux specification
+ name: Variables
+ weight: 120
+---
+
+A **variable** represents a storage location for a single value.
+Variables are immutable.
+Once a variable is given a value, it holds that value for the remainder of its lifetime.
diff --git a/layouts/shortcodes/children.html b/layouts/shortcodes/children.html
new file mode 100644
index 000000000..6c0919e2a
--- /dev/null
+++ b/layouts/shortcodes/children.html
@@ -0,0 +1,90 @@
+{{ $showhidden := .Get "showhidden"}}
+{{ $style := .Get "style" | default "h3" }}
+{{ $depth := .Get "depth" | default 1 }}
+{{ $withDescription := .Get "description" | default true }}
+{{ $sortTerm := .Get "sort" | default "Weight" }}
+
+
+
+ {{ .Scratch.Set "pages" .Page.Pages }}
+ {{ if .Page.Sections}}
+ {{ .Scratch.Set "pages" (.Page.Pages | union .Page.Sections) }}
+ {{end}}
+ {{ $pages := (.Scratch.Get "pages") }}
+
+ {{if eq $sortTerm "Weight"}}
+ {{template "childs" dict "menu" $pages.ByWeight "style" $style "showhidden" $showhidden "count" 1 "depth" $depth "pages" .Site.Pages "description" $withDescription "sortTerm" $sortTerm}}
+ {{else if eq $sortTerm "Name"}}
+ {{template "childs" dict "menu" $pages.ByTitle "style" $style "showhidden" $showhidden "count" 1 "depth" $depth "pages" .Site.Pages "description" $withDescription "sortTerm" $sortTerm}}
+ {{else if eq $sortTerm "PublishDate"}}
+ {{template "childs" dict "menu" $pages.ByPublishDate "style" $style "showhidden" $showhidden "count" 1 "depth" $depth "pages" .Site.Pages "description" $withDescription "sortTerm" $sortTerm}}
+ {{else if eq $sortTerm "Date"}}
+ {{template "childs" dict "menu" $pages.ByDate "style" $style "showhidden" $showhidden "count" 1 "depth" $depth "pages" .Site.Pages "description" $withDescription "sortTerm" $sortTerm}}
+ {{else if eq $sortTerm "Length"}}
+ {{template "childs" dict "menu" $pages.ByLength "style" $style "showhidden" $showhidden "count" 1 "depth" $depth "pages" .Site.Pages "description" $withDescription "sortTerm" $sortTerm}}
+ {{else}}
+ {{template "childs" dict "menu" $pages "style" $style "showhidden" $showhidden "count" 1 "depth" $depth "pages" .Site.Pages "description" $withDescription "sortTerm" $sortTerm}}
+ {{end}}
+
+
+{{.Inner|safeHTML}}
+
+{{ define "childs" }}
+ {{ range .menu }}
+ {{ if and .Params.hidden (not $.showhidden) }}
+ {{else}}
+
+
+{{if hasPrefix $.style "h"}}
+ {{$num := sub ( int (trim $.style "h") ) 1 }}
+ {{$numn := add $num $.count }}
+
+{{(printf "" $numn)|safeHTML}}
+{{ .Title }}
+{{(printf "" $numn)|safeHTML}}
+
+{{else}}
+{{(printf "<%s>" $.style)|safeHTML}}
+{{ .Title }}
+{{(printf "%s>" $.style)|safeHTML}}
+{{end}}
+
+ {{if $.description}}
+ {{if .Description}}
+{{.Description}}
+ {{else}}
+{{.Summary}}
+ {{end}}
+ {{end}}
+
+ {{ if lt $.count $.depth}}
+{{if eq $.style "li"}}
+
+{{end}}
+ {{ $.Page.Scratch.Set "pages" .Pages }}
+ {{ if .Sections}}
+ {{ $.Page.Scratch.Set "pages" (.Pages | union .Sections) }}
+ {{end}}
+ {{ $pages := ($.Page.Scratch.Get "pages") }}
+
+ {{if eq $.sortTerm "Weight"}}
+ {{template "childs" dict "menu" $pages.ByWeight "style" $.style "showhidden" $.showhidden "count" (add $.count 1) "depth" $.depth "pages" $.pages "description" $.description "sortTerm" $.sortTerm}}
+ {{else if eq $.sortTerm "Name"}}
+ {{template "childs" dict "menu" $pages.ByTitle "style" $.style "showhidden" $.showhidden "count" (add $.count 1) "depth" $.depth "pages" $.pages "description" $.description "sortTerm" $.sortTerm}}
+ {{else if eq $.sortTerm "PublishDate"}}
+ {{template "childs" dict "menu" $pages.ByPublishDate "style" $.style "showhidden" $.showhidden "count" (add $.count 1) "depth" $.depth "pages" $.pages "description" $.description "sortTerm" $.sortTerm}}
+ {{else if eq $.sortTerm "Date"}}
+ {{template "childs" dict "menu" $pages.ByDate "style" $.style "showhidden" $.showhidden "count" (add $.count 1) "depth" $.depth "pages" $.pages "description" $.description "sortTerm" $.sortTerm}}
+ {{else if eq $.sortTerm "Length"}}
+ {{template "childs" dict "menu" $pages.ByLength "style" $.style "showhidden" $.showhidden "count" (add $.count 1) "depth" $.depth "pages" $.pages "description" $.description "sortTerm" $.sortTerm}}
+ {{else}}
+ {{template "childs" dict "menu" $pages "style" $.style "showhidden" $.showhidden "count" (add $.count 1) "depth" $.depth "pages" $.pages "description" $.description "sortTerm" $.sortTerm}}
+ {{end}}
+{{if eq $.style "li"}}
+
+{{end}}
+ {{end}}
+
+ {{end}}
+ {{end}}
+{{end}}
diff --git a/layouts/shortcodes/function-list.html b/layouts/shortcodes/function-list.html
new file mode 100644
index 000000000..34a256068
--- /dev/null
+++ b/layouts/shortcodes/function-list.html
@@ -0,0 +1,33 @@
+{{ $category := (.Get "category") }}
+{{ $menu := (.Get "menu")}}
+
+{{ range (index .Site.Menus $menu) }}
+ {{ if .HasChildren}}
+ {{ range .Children }}
+ {{ if eq .Parent $category }}
+ {{ if not .HasChildren }}
+ - {{ .Name }}
+ {{ end }}
+ {{ end }}
+ {{ if .HasChildren}}
+ {{ range .Children }}
+ {{ if eq .Parent $category }}
+ {{ if not .HasChildren }}
+ - {{ .Name }}
+ {{ end }}
+ {{ end }}
+ {{ if .HasChildren}}
+ {{ range .Children }}
+ {{ if eq .Parent $category }}
+ {{ if not .HasChildren }}
+ - {{ .Name }}
+ {{ end }}
+ {{ end }}
+ {{ end }}
+ {{ end }}
+ {{ end }}
+ {{ end }}
+ {{ end }}
+ {{ end }}
+{{ end }}
+
diff --git a/static/img/flux-windowed-aggregates-ungrouped.png b/static/img/flux-windowed-aggregates-ungrouped.png
new file mode 100644
index 000000000..1b5ff82a6
Binary files /dev/null and b/static/img/flux-windowed-aggregates-ungrouped.png differ
diff --git a/static/img/flux-windowed-aggregates.png b/static/img/flux-windowed-aggregates.png
new file mode 100644
index 000000000..1f1c6bdab
Binary files /dev/null and b/static/img/flux-windowed-aggregates.png differ
diff --git a/static/img/flux-windowed-data.png b/static/img/flux-windowed-data.png
new file mode 100644
index 000000000..de05d3716
Binary files /dev/null and b/static/img/flux-windowed-data.png differ
diff --git a/static/img/grouping-by-cpu-time.png b/static/img/grouping-by-cpu-time.png
new file mode 100644
index 000000000..05d896df7
Binary files /dev/null and b/static/img/grouping-by-cpu-time.png differ
diff --git a/static/img/grouping-by-time.png b/static/img/grouping-by-time.png
new file mode 100644
index 000000000..e6914c377
Binary files /dev/null and b/static/img/grouping-by-time.png differ
diff --git a/static/img/grouping-data-set.png b/static/img/grouping-data-set.png
new file mode 100644
index 000000000..211701fa6
Binary files /dev/null and b/static/img/grouping-data-set.png differ
diff --git a/static/img/simple-unwindowed-data.png b/static/img/simple-unwindowed-data.png
new file mode 100644
index 000000000..bbb74acc4
Binary files /dev/null and b/static/img/simple-unwindowed-data.png differ
diff --git a/static/img/simple-windowed-aggregate-data.png b/static/img/simple-windowed-aggregate-data.png
new file mode 100644
index 000000000..830ac6b92
Binary files /dev/null and b/static/img/simple-windowed-aggregate-data.png differ
diff --git a/static/img/simple-windowed-data.png b/static/img/simple-windowed-data.png
new file mode 100644
index 000000000..304274b3c
Binary files /dev/null and b/static/img/simple-windowed-data.png differ